Adding all project files
This commit is contained in:
parent
6c9e127bdc
commit
cd4316ad0f
42289 changed files with 8009643 additions and 0 deletions
12
venv/Lib/site-packages/torch/distributed/_tools/__init__.py
Normal file
12
venv/Lib/site-packages/torch/distributed/_tools/__init__.py
Normal file
|
@ -0,0 +1,12 @@
|
|||
from .fsdp2_mem_tracker import FSDPMemTracker
|
||||
from .mem_tracker import MemTracker
|
||||
from .memory_tracker import MemoryTracker
|
||||
from .mod_tracker import ModTracker
|
||||
from .runtime_estimator import RuntimeEstimator
|
||||
from .sac_estimator import (
|
||||
MSPS,
|
||||
SACEstimator,
|
||||
SACGreedyOrderMeta,
|
||||
SACStats,
|
||||
SACTradeOffStats,
|
||||
)
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
@ -0,0 +1,33 @@
|
|||
import warnings
|
||||
|
||||
import torch
|
||||
from torch.utils._python_dispatch import is_traceable_wrapper_subclass
|
||||
|
||||
|
||||
def get_untyped_storages(t: torch.Tensor) -> set[torch.UntypedStorage]:
|
||||
"""
|
||||
Recursively extracts untyped storages from a tensor or its subclasses.
|
||||
|
||||
Args:
|
||||
t (torch.Tensor): The tensor to extract storages from.
|
||||
|
||||
Returns:
|
||||
Set[torch.UntypedStorage]: A set of untyped storages.
|
||||
"""
|
||||
unflattened_tensors = [t]
|
||||
flattened_tensor_storages = set()
|
||||
while len(unflattened_tensors) > 0:
|
||||
obj = unflattened_tensors.pop()
|
||||
if is_traceable_wrapper_subclass(obj):
|
||||
attrs, _ = obj.__tensor_flatten__() # type: ignore[attr-defined]
|
||||
unflattened_tensors.extend([getattr(obj, attr) for attr in attrs])
|
||||
else:
|
||||
if not hasattr(obj, "untyped_storage"):
|
||||
warnings.warn(
|
||||
f"Expected a tensor or a traceable wrapper-subclass of tensor, but got {type(obj)}",
|
||||
category=UserWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
else:
|
||||
flattened_tensor_storages.add(obj.untyped_storage())
|
||||
return flattened_tensor_storages
|
|
@ -0,0 +1,307 @@
|
|||
import random
|
||||
from typing import Any
|
||||
|
||||
import torch
|
||||
from torch._C._distributed_c10d import (
|
||||
_resolve_process_group,
|
||||
FakeWork,
|
||||
ProcessGroup,
|
||||
Work,
|
||||
)
|
||||
from torch.utils._pytree import tree_map_only
|
||||
|
||||
|
||||
torch.distributed.batch_isend_irecv
|
||||
|
||||
c10d = torch.ops.c10d
|
||||
_c10d_functional = torch.ops._c10d_functional
|
||||
_c10d_functional_autograd = torch.ops._c10d_functional_autograd
|
||||
_dtensor = torch.ops._dtensor
|
||||
used_ids: set[int] = set()
|
||||
|
||||
|
||||
def generate_unique_id() -> int:
|
||||
while True:
|
||||
new_id = random.randint(1, 10**9)
|
||||
if new_id not in used_ids:
|
||||
used_ids.add(new_id)
|
||||
return new_id
|
||||
|
||||
|
||||
# Function to create and return FakeWork object
|
||||
def create_fakework(args, return_first_arg=True): # type: ignore[no-untyped-def]
|
||||
work = FakeWork()
|
||||
work.seq_id = generate_unique_id()
|
||||
fakework_script_obj = work.boxed()
|
||||
return (args[0], fakework_script_obj) if return_first_arg else fakework_script_obj
|
||||
|
||||
|
||||
# Dictionary mapping collective operations to their meta functions
|
||||
# All 20 ops from torch.csrc.distributed.c10d.Ops.cpp are included
|
||||
# _DEPRECATED_META_FUNCTIONS = {
|
||||
# "allreduce_coalesced_": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
# "allgather_coalesced_": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
# "allgather_into_tensor_coalesced_": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
# "reduce_scatter_tensor_coalesced_": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
# }
|
||||
_META_FUNCTIONS = {
|
||||
"broadcast_": lambda *args: create_fakework(args),
|
||||
"allreduce_": lambda *args: create_fakework(args),
|
||||
"allgather_": lambda *args: create_fakework(args),
|
||||
"_allgather_base_": lambda *args: create_fakework(args),
|
||||
"reduce_scatter_": lambda *args: create_fakework(args),
|
||||
"_reduce_scatter_base_": lambda *args: create_fakework(args),
|
||||
"reduce_": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
"gather_": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
"scatter_": lambda *args: create_fakework(args),
|
||||
"alltoall_": lambda *args: create_fakework(args),
|
||||
"alltoall_base_": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
"barrier": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
"monitored_barrier_": lambda *args: None,
|
||||
"send": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
"recv_": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
"recv_any_source_": lambda *args: create_fakework(args, return_first_arg=False),
|
||||
}
|
||||
|
||||
if not torch._running_with_deploy():
|
||||
lib_impl = torch.library.Library("c10d", "IMPL") # noqa: TOR901
|
||||
for op, meta_func in _META_FUNCTIONS.items():
|
||||
lib_impl.impl(op, meta_func, "Meta")
|
||||
|
||||
# List of collective operation functions including functional collectives
|
||||
# Note: The following collectives might be deprecated soon hence not adding them
|
||||
# depcreated_non_functional_collectives = [
|
||||
# c10d.allreduce_coalesced_.default,
|
||||
# c10d.reduce_scatter_tensor_coalesced_.default,
|
||||
# c10d.allgather_into_tensor_coalesced_.default,
|
||||
# c10d.allgather_coalesced_.default,
|
||||
# ]
|
||||
non_functional_collectives: set[torch._ops.OpOverload] = {
|
||||
c10d.broadcast_.default,
|
||||
c10d.allreduce_.default,
|
||||
c10d.reduce_.default,
|
||||
c10d.send.default,
|
||||
c10d.recv_.default,
|
||||
c10d.recv_any_source_.default,
|
||||
c10d.allgather_.default,
|
||||
c10d.reduce_scatter_.default,
|
||||
c10d._reduce_scatter_base_.default,
|
||||
c10d._allgather_base_.default,
|
||||
c10d.gather_.default,
|
||||
c10d.scatter_.default,
|
||||
c10d.alltoall_.default,
|
||||
c10d.alltoall_base_.default,
|
||||
c10d.barrier.default,
|
||||
c10d.monitored_barrier_.default,
|
||||
}
|
||||
functional_collectives: set[torch._ops.OpOverload] = {
|
||||
_c10d_functional.broadcast.default,
|
||||
_c10d_functional.all_reduce.default,
|
||||
_c10d_functional.all_gather_into_tensor.default,
|
||||
_c10d_functional.reduce_scatter_tensor.default,
|
||||
_c10d_functional.all_to_all_single.default,
|
||||
_c10d_functional_autograd.all_to_all_single.default,
|
||||
_c10d_functional.wait_tensor.default,
|
||||
_c10d_functional.all_reduce_.default,
|
||||
_c10d_functional.all_reduce_coalesced.default,
|
||||
_c10d_functional.all_reduce_coalesced_.default,
|
||||
_c10d_functional.all_gather_into_tensor_out.default,
|
||||
_c10d_functional.all_gather_into_tensor_coalesced.default,
|
||||
_c10d_functional_autograd.all_gather_into_tensor.default,
|
||||
_c10d_functional.reduce_scatter_tensor_coalesced.default,
|
||||
_c10d_functional_autograd.reduce_scatter_tensor.default,
|
||||
_c10d_functional.broadcast_.default,
|
||||
_dtensor.shard_dim_alltoall.default,
|
||||
}
|
||||
|
||||
sync_ops: set[torch._ops.OpOverload] = {
|
||||
c10d.barrier.default,
|
||||
c10d.monitored_barrier_.default,
|
||||
_c10d_functional.wait_tensor.default,
|
||||
}
|
||||
|
||||
collective_ops = set.union(functional_collectives, non_functional_collectives)
|
||||
|
||||
|
||||
class CollectiveOp:
|
||||
# Static sets for performance optimization
|
||||
PG_ARG_1 = {
|
||||
c10d.broadcast_.default,
|
||||
c10d.allreduce_.default,
|
||||
c10d.reduce_.default,
|
||||
c10d.send.default,
|
||||
c10d.recv_.default,
|
||||
c10d.recv_any_source_.default,
|
||||
c10d.barrier.default,
|
||||
# c10d.allreduce_coalesced_.default
|
||||
}
|
||||
|
||||
PG_ARG_2 = {
|
||||
c10d.allgather_.default,
|
||||
c10d._allgather_base_.default,
|
||||
c10d.reduce_scatter_.default,
|
||||
c10d._reduce_scatter_base_.default,
|
||||
c10d.gather_.default,
|
||||
c10d.scatter_.default,
|
||||
c10d.alltoall_.default,
|
||||
c10d.alltoall_base_.default,
|
||||
# c10d.allgather_coalesced_.default,
|
||||
# c10d.allgather_into_tensor_coalesced_.default
|
||||
# c10d.reduce_scatter_tensor_coalesced_.default
|
||||
}
|
||||
|
||||
PG_ARG_3 = {
|
||||
_c10d_functional.broadcast.default,
|
||||
_c10d_functional.broadcast_.default,
|
||||
_c10d_functional.all_reduce.default,
|
||||
_c10d_functional.all_reduce_.default,
|
||||
_c10d_functional.all_reduce_coalesced.default,
|
||||
_c10d_functional.all_reduce_coalesced_.default,
|
||||
_c10d_functional.all_gather_into_tensor.default,
|
||||
_c10d_functional.all_gather_into_tensor_out.default,
|
||||
_c10d_functional_autograd.all_gather_into_tensor.default,
|
||||
_c10d_functional.all_gather_into_tensor_coalesced.default,
|
||||
}
|
||||
|
||||
PG_ARG_4 = {
|
||||
_c10d_functional.reduce_scatter_tensor.default,
|
||||
_c10d_functional.reduce_scatter_tensor_coalesced.default,
|
||||
_c10d_functional_autograd.reduce_scatter_tensor.default,
|
||||
_c10d_functional.all_to_all_single.default,
|
||||
_c10d_functional_autograd.all_to_all_single.default,
|
||||
_dtensor.shard_dim_alltoall.default,
|
||||
}
|
||||
|
||||
WK_ARG_1 = {
|
||||
c10d.broadcast_.default,
|
||||
c10d.allreduce_.default,
|
||||
c10d.allgather_.default,
|
||||
c10d.reduce_scatter_.default,
|
||||
c10d._reduce_scatter_base_.default,
|
||||
c10d._allgather_base_.default,
|
||||
c10d.scatter_.default,
|
||||
c10d.alltoall_.default,
|
||||
}
|
||||
|
||||
WK = {
|
||||
c10d.send.default,
|
||||
c10d.recv_.default,
|
||||
c10d.recv_any_source_.default,
|
||||
c10d.reduce_.default,
|
||||
c10d.gather_.default,
|
||||
c10d.alltoall_base_.default,
|
||||
c10d.barrier.default,
|
||||
}
|
||||
|
||||
COMM_TENSOR_ARG_0 = {
|
||||
c10d.allreduce_.default,
|
||||
c10d.send.default,
|
||||
c10d.recv_.default,
|
||||
c10d.recv_any_source_.default,
|
||||
c10d.allgather_.default,
|
||||
c10d.gather_.default,
|
||||
c10d.reduce_.default,
|
||||
c10d.broadcast_.default,
|
||||
_c10d_functional.all_reduce_coalesced.default,
|
||||
_c10d_functional.all_reduce_coalesced_.default,
|
||||
# c10d.allreduce_coalesced_.default
|
||||
# c10d.allgather_coalesced_.default
|
||||
# c10d.allgather_into_tensor_coalesced_.default,
|
||||
}
|
||||
|
||||
COMM_TENSOR_ARG_1 = {
|
||||
c10d.reduce_scatter_.default,
|
||||
c10d.scatter_.default,
|
||||
# c10d.reduce_scatter_tensor_coalesced_.default,
|
||||
}
|
||||
|
||||
COMM_TENSOR_ARG_RES = {
|
||||
_c10d_functional.all_gather_into_tensor.default,
|
||||
_c10d_functional_autograd.all_gather_into_tensor.default,
|
||||
}
|
||||
|
||||
COMM_TENSOR_SINGLE_UNTYPED_STORAGE = {
|
||||
c10d._allgather_base_.default,
|
||||
_c10d_functional.broadcast.default,
|
||||
_c10d_functional.broadcast_.default,
|
||||
_c10d_functional.all_reduce.default,
|
||||
_c10d_functional.all_reduce_.default,
|
||||
_c10d_functional.reduce_scatter_tensor.default,
|
||||
_c10d_functional_autograd.reduce_scatter_tensor.default,
|
||||
}
|
||||
|
||||
COMM_TENSOR_ARG_0_AND_RES = {
|
||||
_c10d_functional.all_to_all_single.default,
|
||||
_c10d_functional_autograd.all_to_all_single.default,
|
||||
_dtensor.shard_dim_alltoall.default,
|
||||
}
|
||||
|
||||
COMM_TENSOR_RES_SUM = {
|
||||
_c10d_functional.all_gather_into_tensor_coalesced.default,
|
||||
_c10d_functional.reduce_scatter_tensor_coalesced.default,
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def sum_tensors(arg: Any) -> int:
|
||||
"""Calculate total memory consumed by the tensors in the argument."""
|
||||
total_memory = 0
|
||||
|
||||
def sum_bytes(t: torch.Tensor) -> None:
|
||||
nonlocal total_memory
|
||||
total_memory += t.untyped_storage().nbytes()
|
||||
|
||||
tree_map_only(torch.Tensor, sum_bytes, arg)
|
||||
return total_memory
|
||||
|
||||
@staticmethod
|
||||
def get_process_group(func, args) -> ProcessGroup: # type: ignore[no-untyped-def]
|
||||
"""Retrieve the process group for collective operations, except `wait_tensor`."""
|
||||
if func in CollectiveOp.PG_ARG_1:
|
||||
return ProcessGroup.unbox(args[1])
|
||||
if func in CollectiveOp.PG_ARG_2:
|
||||
return ProcessGroup.unbox(args[2])
|
||||
if func in CollectiveOp.PG_ARG_3:
|
||||
return _resolve_process_group(args[2])
|
||||
if func in CollectiveOp.PG_ARG_4:
|
||||
return _resolve_process_group(args[3])
|
||||
raise TypeError(f"Func {func} not found in {collective_ops}")
|
||||
|
||||
@staticmethod
|
||||
def get_comm_tensor_size(func, res, args, kwargs) -> int: # type: ignore[no-untyped-def]
|
||||
"""Compute the communication tensor size, except for `wait_tensor`, `barrier`, and `monitored_barrier`."""
|
||||
if func in CollectiveOp.COMM_TENSOR_ARG_0:
|
||||
return CollectiveOp.sum_tensors(args[0])
|
||||
if func in CollectiveOp.COMM_TENSOR_ARG_1:
|
||||
return CollectiveOp.sum_tensors(args[1])
|
||||
if func in CollectiveOp.COMM_TENSOR_ARG_RES:
|
||||
return res.untyped_storage().nbytes()
|
||||
if func in CollectiveOp.COMM_TENSOR_SINGLE_UNTYPED_STORAGE:
|
||||
return args[0].untyped_storage().nbytes()
|
||||
if func == c10d._reduce_scatter_base_.default:
|
||||
return args[1].untyped_storage().nbytes()
|
||||
if func == c10d.alltoall_.default:
|
||||
# TODO(@sanketpurandare) - Confirm size computation
|
||||
return max(
|
||||
CollectiveOp.sum_tensors(args[0]), CollectiveOp.sum_tensors(args[1])
|
||||
)
|
||||
if func == c10d.alltoall_base_.default:
|
||||
# TODO(@sanketpurandare) - Confirm size computation
|
||||
return max(
|
||||
args[0].untyped_storage().nbytes(), args[1].untyped_storage().nbytes()
|
||||
)
|
||||
if func == _c10d_functional.all_gather_into_tensor_out.default:
|
||||
return args[-1].untyped_storage().nbytes()
|
||||
if func in CollectiveOp.COMM_TENSOR_RES_SUM:
|
||||
return CollectiveOp.sum_tensors(res)
|
||||
if func in CollectiveOp.COMM_TENSOR_ARG_0_AND_RES:
|
||||
# TODO(@sanketpurandare) - Confirm size computation
|
||||
return args[0].untyped_storage().nbytes() + res.untyped_storage().nbytes()
|
||||
raise TypeError(f"Unknown function: {func} in {collective_ops}")
|
||||
|
||||
@staticmethod
|
||||
def get_work(func, res) -> Work: # type: ignore[no-untyped-def]
|
||||
if func in CollectiveOp.WK:
|
||||
return FakeWork.unbox(res)
|
||||
elif func in CollectiveOp.WK_ARG_1:
|
||||
return FakeWork.unbox(res[1])
|
||||
raise TypeError(f"Func {func} not found in {collective_ops}")
|
|
@ -0,0 +1,547 @@
|
|||
from copy import deepcopy
|
||||
from enum import auto, Enum
|
||||
from functools import partial, wraps
|
||||
from typing import Any, Callable, NamedTuple, Optional, TypeVar, Union
|
||||
from typing_extensions import ParamSpec, TypeVarTuple, Unpack
|
||||
|
||||
import torch
|
||||
import torch.distributed._tools.fake_collectives
|
||||
from torch import nn, optim
|
||||
from torch._guards import active_fake_mode
|
||||
from torch.distributed._tools.mem_tracker import _RefType, _State, MemTracker
|
||||
from torch.distributed.fsdp import FSDPModule
|
||||
from torch.distributed.fsdp._fully_shard._fsdp_param_group import FSDPParamGroup
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
from torch.utils._pytree import tree_map_only
|
||||
from torch.utils.weak import WeakIdKeyDictionary, weakref
|
||||
|
||||
|
||||
_TOTAL_KEY = "Total"
|
||||
|
||||
__all__ = ["FSDPMemTracker"]
|
||||
|
||||
_P = ParamSpec("_P")
|
||||
_R = TypeVar("_R")
|
||||
_Ts = TypeVarTuple("_Ts")
|
||||
|
||||
c10d = torch.ops.c10d
|
||||
|
||||
|
||||
class _FSDPRefType(_RefType):
|
||||
"""
|
||||
Enumerates categories of memory usage in FSDP modules, including parameters, gradients, activations,
|
||||
and optimizer states.
|
||||
|
||||
Attributes:
|
||||
SHARDED_PARAM (str): Memory usage of sharded parameters.
|
||||
UNSHARDED_PARAM (str): Memory usage of unsharded parameters.
|
||||
SHARDED_GRAD (str): Memory usage of sharded gradients corresponding to the sharded parameters.
|
||||
UNSHARDED_GRAD (str): Memory usage of unsharded gradients corresponding to the unsharded parameters.
|
||||
ACT (str): Memory usage of activations and tensors from forward and AC recomputation.
|
||||
TEMP (str): Memory usage of temporary tensors during the backward pass including gradients of activations.
|
||||
ALL_GATHER (str): Memory usage of all_gather output tensor.
|
||||
REDUCE_SCATTER (str): Memory usage of reduce_scatter input tensor.
|
||||
OPT (str): Memory usage of tensors storing optimizer states.
|
||||
INP (str): Memory usage of input tensors.
|
||||
"""
|
||||
|
||||
SHARDED_PARAM = "Sharded Param"
|
||||
UNSHARDED_PARAM = "Unsharded Param"
|
||||
BUFFER = "Buffer"
|
||||
SHARDED_GRAD = "Sharded Grad"
|
||||
UNSHARDED_GRAD = "Unsharded Grad"
|
||||
ACT = "Activation"
|
||||
TEMP = "Temp"
|
||||
ALL_GATHER = "All Gather"
|
||||
REDUCE_SCATTER = "Reduce Scatter"
|
||||
OPT = "OptState"
|
||||
INP = "Inputs"
|
||||
|
||||
|
||||
class _SavedFSDPMethods(NamedTuple):
|
||||
pre_backward: Callable
|
||||
post_backward: Callable
|
||||
|
||||
|
||||
class _FSDPModState(_State):
|
||||
"""
|
||||
Enumerates the states of FSDP modules during the forward and backward passes.
|
||||
"""
|
||||
|
||||
BEF_PRE_FW = "Before Pre-Forward"
|
||||
AFT_PRE_FW = "After Pre-Forward"
|
||||
BEF_POST_FW = "Before Post-Forward"
|
||||
AFT_POST_FW = "After Post-Forward"
|
||||
BEF_PRE_BW = "Before Pre-Backward"
|
||||
AFT_PRE_BW = "After Pre-Backward"
|
||||
BEF_POST_BW = "Before Post-Backward"
|
||||
AFT_POST_BW = "After Post-Backward"
|
||||
PRE_FW_AC = "Pre-Forward AC"
|
||||
POST_FW_AC = "Post-Forward AC"
|
||||
PEAK_FW = "Peak Forward"
|
||||
PEAK_BW = "Peak Backward"
|
||||
|
||||
|
||||
class _FSDPModMemStats:
|
||||
"""
|
||||
A class to store the memory statistics of an FSDP module.
|
||||
|
||||
Args:
|
||||
mod_fqn (str): The fully qualified name of the FSDP module.
|
||||
|
||||
Attributes:
|
||||
snapshots (Dict[_FSDPModState, Dict[torch.device, Dict[str, int]]]): A dictionary of memory snapshots
|
||||
of the module at different states as defined by ``_FSDPModState``. Each key is a device, and
|
||||
each value is another dictionary with keys as memory reference types defined by ``_FSDPRefType`` and
|
||||
values as the memory consumed in bytes.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, mod_fqn: str) -> None:
|
||||
self.mod_fqn = mod_fqn
|
||||
self.local_peak: dict[torch.device, int] = {}
|
||||
self.snapshots: dict[
|
||||
_FSDPModState, list[dict[torch.device, dict[str, int]]]
|
||||
] = {}
|
||||
|
||||
|
||||
class _FSDPState(Enum):
|
||||
PRE_FW = auto()
|
||||
FW = auto()
|
||||
POST_FW = auto()
|
||||
PRE_BW = auto()
|
||||
BW = auto()
|
||||
POST_BW = auto()
|
||||
|
||||
|
||||
class FSDPMemTracker(MemTracker):
|
||||
"""
|
||||
A ``TorchDispatchMode`` based context manager that extends ``torch.distributed._tools.mem_tracker.MemTracker`` to track
|
||||
and categorize the peak memory and module-wise memory usage of FSDP modules.
|
||||
|
||||
It tracks the peak memory usage across all the devices of all the FSDP modules in the module tree and categorizes
|
||||
the tensor memory usage as defined by ``_FSDPRefType``. Further, it captures memory `snapshots` at different stages of
|
||||
the module execution defined by ``_FSDPModState``.
|
||||
|
||||
Attributes:
|
||||
memory_tracking: A weakref key dictionary to store the memory statistics of each module. Each key is a reference
|
||||
to a module, and each value is a ``_FSDPModMemStats`` object that stores the memory statistics of the module.
|
||||
|
||||
Args:
|
||||
mod (torch.nn.Module): The root FSDP module to be tracked.
|
||||
optm (torch.optim.Optimizer, optional): The optimizer to be tracked.
|
||||
|
||||
Note: Please refer to ``torch.distributed._tools.mem_tracker.MemTracker`` to learn about the limitations.
|
||||
|
||||
Example usage
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
module = ...
|
||||
optimizer = ...
|
||||
inp = ...
|
||||
fmt = FSDPMemTracker(module, optimizer)
|
||||
fmt.track_inputs((inp,))
|
||||
with fmt:
|
||||
optimizer.zero_grad()
|
||||
loss = module(inp)
|
||||
print("After Forward:")
|
||||
fmt.display_snapshot("current")
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
fmt.display_snapshot("peak")
|
||||
fmt.display_modulewise_snapshots(depth=3, units="MB")
|
||||
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
mod: torch.nn.Module,
|
||||
optm: Optional[torch.optim.Optimizer] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
assert isinstance(mod, FSDPModule), "FSDPMemTracker only supports FSDP modules"
|
||||
self._root_mod = mod
|
||||
self._optm = optm
|
||||
self._fsdp_mod_to_saved_methods: WeakIdKeyDictionary = WeakIdKeyDictionary()
|
||||
self._fsdp_state: _FSDPState = _FSDPState.PRE_FW
|
||||
self._ref_class: type[_RefType] = _FSDPRefType
|
||||
|
||||
def _instrument_fsdp_sharded_params_grads(
|
||||
self, fsdp_param_group: FSDPParamGroup
|
||||
) -> None:
|
||||
# Track sharded params and grads after initilization
|
||||
for fsdp_param in fsdp_param_group.fsdp_params:
|
||||
self._update_and_maybe_create_winfos(
|
||||
fsdp_param.sharded_param,
|
||||
_FSDPRefType.SHARDED_PARAM,
|
||||
)
|
||||
sharded_grad = fsdp_param.sharded_param.grad
|
||||
if sharded_grad is not None:
|
||||
self._update_and_maybe_create_winfos(
|
||||
sharded_grad,
|
||||
_FSDPRefType.SHARDED_GRAD,
|
||||
)
|
||||
|
||||
def _fsdp_state_pre_forward(
|
||||
self,
|
||||
fsdp_mod: FSDPModule,
|
||||
orig_fsdp_state_pre_fw: Callable[_P, tuple[tuple[Unpack[_Ts]], dict[str, Any]]],
|
||||
) -> Callable[_P, tuple[tuple[Unpack[_Ts]], dict[str, Any]]]:
|
||||
# We capture memory snapshots before and after ``FSDPState._pre_forward`` to attribute the `unsharded` params
|
||||
# and `all_gather` buffers. There are three cases:
|
||||
# Case 1: If the module is not in the ``memory_tracking`` dictionary, create a new ``_FSDPModMemStats``
|
||||
# instance for the module and add it to the ``memory_tracking`` dictionary.
|
||||
# Case 2: If the module is already in the ``memory_tracking`` dictionary and we are in backward, this means
|
||||
# we are in the AC region. We check if this is the top most module in the AC region. If it is,
|
||||
# we store a weak reference and set the flag ``_in_ac`` to True.
|
||||
# Case 3: If the module is already in the ``memory_tracking`` dictionary and we are in forward, this means
|
||||
# this module is called for the second time. If it is a root module, that means we are in the next
|
||||
# iteration and we error out. If it is not a root module, that means it's a submodule that is being
|
||||
# used multiple times in the same iteration, which we allow and track.
|
||||
# For Case 1 and 3, we also initialiaze the ``local_peak`` and ``PEAK_FW`` snapshot for the module.
|
||||
# For Case 2 we only capture 1 snapshot after ``FSDPState._pre_forward`` runs because it is a no-op.
|
||||
@wraps(orig_fsdp_state_pre_fw)
|
||||
def inner(
|
||||
*args: _P.args, **kwargs: _P.kwargs
|
||||
) -> tuple[tuple[Unpack[_Ts]], dict[str, Any]]:
|
||||
self._fsdp_state = _FSDPState.PRE_FW
|
||||
mod_fqn = self._mod_tracker.get_known_fqn(fsdp_mod)
|
||||
assert mod_fqn is not None
|
||||
if fsdp_mod not in self.memory_tracking:
|
||||
mod_stat = _FSDPModMemStats(mod_fqn)
|
||||
self.memory_tracking[fsdp_mod] = mod_stat
|
||||
snapshot = self.get_tracker_snapshot()
|
||||
mod_stat.local_peak = {
|
||||
dev: dev_snap[_TOTAL_KEY] for dev, dev_snap in snapshot.items()
|
||||
}
|
||||
mod_stat.snapshots.setdefault(_FSDPModState.PEAK_FW, []).append(
|
||||
snapshot
|
||||
)
|
||||
mod_stat.snapshots.setdefault(_FSDPModState.BEF_PRE_FW, []).append(
|
||||
deepcopy(snapshot)
|
||||
)
|
||||
elif not self._mod_tracker.is_bw:
|
||||
parents = self._mod_tracker.parents - {mod_fqn}
|
||||
if len(parents) == 1 and "Global" in parents:
|
||||
raise NotImplementedError(
|
||||
"FSDPMemTracker does not support memory tracking for multiple iterative calls."
|
||||
" Either use ``reset_mod_stats`` to clear module memory stats for the previous iteration"
|
||||
" or file a github issue if you need this feature."
|
||||
)
|
||||
|
||||
args, kwargs = orig_fsdp_state_pre_fw(*args, **kwargs)
|
||||
|
||||
fsdp_state = fsdp_mod._get_fsdp_state()
|
||||
if fsdp_param_group := fsdp_state._fsdp_param_group:
|
||||
for fsdp_param in fsdp_param_group.fsdp_params:
|
||||
self._update_and_maybe_create_winfos(
|
||||
fsdp_param.unsharded_param,
|
||||
_FSDPRefType.UNSHARDED_PARAM,
|
||||
)
|
||||
mod_stat = self.memory_tracking[fsdp_mod]
|
||||
if self._mod_tracker.is_bw:
|
||||
state = _FSDPModState.PRE_FW_AC
|
||||
if self._ac_mod is None:
|
||||
self._ac_mod = weakref.ref(fsdp_mod)
|
||||
self._in_ac = True
|
||||
else:
|
||||
state = _FSDPModState.AFT_PRE_FW
|
||||
mod_stat.snapshots.setdefault(state, []).append(self.get_tracker_snapshot())
|
||||
self._fsdp_state = _FSDPState.FW
|
||||
return args, kwargs
|
||||
|
||||
return inner
|
||||
|
||||
def _fsdp_state_post_forward(
|
||||
self,
|
||||
fsdp_mod: FSDPModule,
|
||||
orig_fsdp_state_post_fw: Callable[_P, _R],
|
||||
) -> Callable[_P, _R]:
|
||||
# We capture memory snapshots before and after ``FSDPState._post_forward`` to capture the resharded state
|
||||
# if ``reshard_after_forward`` is not ``False``. There are two cases:
|
||||
# Case 1: This is called in backward, which means we are in the AC region. If this is the top most module
|
||||
# in the AC region, we set the flag ``_in_ac`` to False.
|
||||
# Case 2: This is called in forward.
|
||||
@wraps(orig_fsdp_state_post_fw)
|
||||
def inner(*args: _P.args, **kwargs: _P.kwargs) -> _R:
|
||||
mod_stat = self.memory_tracking[fsdp_mod]
|
||||
if self._mod_tracker.is_bw:
|
||||
state = _FSDPModState.POST_FW_AC
|
||||
if self._ac_mod is not None and self._ac_mod() is fsdp_mod:
|
||||
self._ac_mod = None
|
||||
self._in_ac = False
|
||||
else:
|
||||
state = _FSDPModState.BEF_POST_FW
|
||||
mod_stat.snapshots.setdefault(state, []).append(self.get_tracker_snapshot())
|
||||
self._fsdp_state = _FSDPState.POST_FW
|
||||
|
||||
output = orig_fsdp_state_post_fw(*args, **kwargs)
|
||||
|
||||
if not self._mod_tracker.is_bw:
|
||||
mod_stat.snapshots.setdefault(_FSDPModState.AFT_POST_FW, []).append(
|
||||
self.get_tracker_snapshot()
|
||||
)
|
||||
return output
|
||||
|
||||
return inner
|
||||
|
||||
def _fsdp_param_group_pre_backward(
|
||||
self,
|
||||
fsdp_mod: FSDPModule,
|
||||
orig_fsdp_param_group_pre_backward: Callable[_P, Any],
|
||||
) -> Callable[_P, None]:
|
||||
# We capture memory snapshots before and after ``FSDPParamGroup.pre_backward`` to capture the pre-fetching
|
||||
# and unsharding of params. We also initialize ``local_peak`` and ``PEAK_BW`` snapshot for the module.
|
||||
@wraps(orig_fsdp_param_group_pre_backward)
|
||||
def inner(*args: _P.args, **kwargs: _P.kwargs) -> None:
|
||||
self._fsdp_state = _FSDPState.PRE_BW
|
||||
mod_stat = self.memory_tracking[fsdp_mod]
|
||||
snapshot = self.get_tracker_snapshot()
|
||||
mod_stat.local_peak = {
|
||||
dev: dev_snap[_TOTAL_KEY] for dev, dev_snap in snapshot.items()
|
||||
}
|
||||
mod_stat.snapshots.setdefault(_FSDPModState.PEAK_BW, []).append(snapshot)
|
||||
mod_stat.snapshots.setdefault(_FSDPModState.BEF_PRE_BW, []).append(
|
||||
deepcopy(snapshot)
|
||||
)
|
||||
orig_fsdp_param_group_pre_backward(*args, **kwargs)
|
||||
|
||||
mod_stat.snapshots.setdefault(_FSDPModState.AFT_PRE_BW, []).append(
|
||||
self.get_tracker_snapshot()
|
||||
)
|
||||
self._fsdp_state = _FSDPState.BW
|
||||
|
||||
return inner
|
||||
|
||||
def _fsdp_param_group_post_backward(
|
||||
self,
|
||||
fsdp_mod: FSDPModule,
|
||||
orig_fsdp_param_group_post_backward: Callable[_P, Any],
|
||||
) -> Callable[_P, None]:
|
||||
# We capture the memory snapshots before and after ``FSDPParamGroup.post_backward`` to track and attribute
|
||||
# the `unsharded` grads before the post backward and then `sharded` grads and `reduce_scatter` buffers
|
||||
# after the post backward.
|
||||
@wraps(orig_fsdp_param_group_post_backward)
|
||||
def inner(*args: _P.args, **kwargs: _P.kwargs) -> None:
|
||||
fsdp_state = fsdp_mod._get_fsdp_state()
|
||||
if fsdp_param_group := fsdp_state._fsdp_param_group:
|
||||
for fsdp_param in fsdp_param_group.fsdp_params:
|
||||
unsharded_grad = fsdp_param._unsharded_param.grad
|
||||
if unsharded_grad is not None:
|
||||
self._update_and_maybe_create_winfos(
|
||||
unsharded_grad,
|
||||
_FSDPRefType.UNSHARDED_GRAD,
|
||||
update_existing=True,
|
||||
)
|
||||
|
||||
mod_stat = self.memory_tracking[fsdp_mod]
|
||||
mod_stat.snapshots.setdefault(_FSDPModState.BEF_POST_BW, []).append(
|
||||
self.get_tracker_snapshot()
|
||||
)
|
||||
self._fsdp_state = _FSDPState.POST_BW
|
||||
orig_fsdp_param_group_post_backward(*args, **kwargs)
|
||||
|
||||
if fsdp_param_group := fsdp_state._fsdp_param_group:
|
||||
for fsdp_param in fsdp_param_group.fsdp_params:
|
||||
sharded_grad = fsdp_param.sharded_param.grad
|
||||
if sharded_grad is not None:
|
||||
self._update_and_maybe_create_winfos(
|
||||
sharded_grad,
|
||||
_FSDPRefType.SHARDED_GRAD,
|
||||
)
|
||||
|
||||
mod_stat.snapshots.setdefault(_FSDPModState.AFT_POST_BW, []).append(
|
||||
self.get_tracker_snapshot()
|
||||
)
|
||||
|
||||
return inner
|
||||
|
||||
def _instrument_fsdp_module(self) -> None:
|
||||
# We uninstall the existing `FSDPState._pre_forward` and `FSDPState._post_forward` hooks and install
|
||||
# our own hooks that wrap them. We choose this over monkey-patching `FSDPParamGroup.pre_forward` and
|
||||
# `FSDPParamGroup.post_forward` because during AC these won't be called.
|
||||
# TODO(@sanketpurandare): This will need to be modified after this PR (https://github.com/pytorch/pytorch/pull/127786)
|
||||
# lands. For backward we monkey-patch the `FSDPParamGroup.pre_backward` and `FSDPParamGroup.post_backward`.
|
||||
for module in self._root_mod.modules():
|
||||
if isinstance(module, FSDPModule):
|
||||
fsdp_state = module._get_fsdp_state()
|
||||
if fsdp_param_group := fsdp_state._fsdp_param_group:
|
||||
self._instrument_fsdp_sharded_params_grads(fsdp_param_group)
|
||||
fsdp_state._pre_forward_hook_handle.remove()
|
||||
fsdp_state._post_forward_hook_handle.remove()
|
||||
fsdp_state._pre_forward_hook_handle = (
|
||||
module.register_forward_pre_hook(
|
||||
self._fsdp_state_pre_forward(
|
||||
module, fsdp_state._pre_forward
|
||||
),
|
||||
prepend=True,
|
||||
with_kwargs=True,
|
||||
)
|
||||
)
|
||||
fsdp_state._post_forward_hook_handle = module.register_forward_hook(
|
||||
self._fsdp_state_post_forward(module, fsdp_state._post_forward),
|
||||
prepend=False,
|
||||
always_call=True,
|
||||
)
|
||||
self._fsdp_mod_to_saved_methods[module] = _SavedFSDPMethods(
|
||||
fsdp_param_group.pre_backward,
|
||||
fsdp_param_group.post_backward,
|
||||
)
|
||||
fsdp_param_group.pre_backward = self._fsdp_param_group_pre_backward( # type: ignore[assignment]
|
||||
module, fsdp_param_group.pre_backward
|
||||
)
|
||||
fsdp_param_group.post_backward = ( # type: ignore[assignment]
|
||||
self._fsdp_param_group_post_backward(
|
||||
module, fsdp_param_group.post_backward
|
||||
)
|
||||
)
|
||||
|
||||
for buffer in self._root_mod.buffers():
|
||||
self._update_and_maybe_create_winfos(
|
||||
buffer,
|
||||
_FSDPRefType.BUFFER,
|
||||
)
|
||||
|
||||
def _instrument_optimizer(self) -> None:
|
||||
# Register a hook on the optimizer step to track the optimizer states.
|
||||
# The pre-hook is to set the flag ``_in_opt`` to True. The post-hook unsets the flag,
|
||||
# and also tracks any optimizer states that are created during the optimizer step.
|
||||
if self._optm is not None:
|
||||
self._track_optimizer_states(_FSDPRefType.OPT, self._optm)
|
||||
|
||||
def _opt_step_pre_hook(
|
||||
optimizer: optim.Optimizer, args: Any, kwargs: Any
|
||||
) -> None:
|
||||
self._in_opt = True
|
||||
|
||||
def _opt_step_post_hook(
|
||||
optimizer: optim.Optimizer, args: Any, kwargs: Any
|
||||
) -> None:
|
||||
self._track_optimizer_states(_FSDPRefType.OPT, optimizer)
|
||||
self._in_opt = False
|
||||
|
||||
self._optimizer_hook_handles = (
|
||||
self._optm.register_step_pre_hook(_opt_step_pre_hook),
|
||||
self._optm.register_step_post_hook(_opt_step_post_hook),
|
||||
)
|
||||
|
||||
def _register_module_and_optimizer_hooks(self) -> None:
|
||||
self._instrument_fsdp_module()
|
||||
self._instrument_optimizer()
|
||||
|
||||
def _deregister_module_and_optimizer_hooks(self) -> None:
|
||||
for (
|
||||
fsdp_mod,
|
||||
saved_methods,
|
||||
) in self._fsdp_mod_to_saved_methods.items():
|
||||
fsdp_state = fsdp_mod._get_fsdp_state()
|
||||
fsdp_state._pre_forward_hook_handle.remove()
|
||||
fsdp_state._post_forward_hook_handle.remove()
|
||||
fsdp_state._pre_forward_hook_handle = fsdp_mod.register_forward_pre_hook(
|
||||
fsdp_state._pre_forward, prepend=True, with_kwargs=True
|
||||
)
|
||||
fsdp_state._post_forward_hook_handle = fsdp_mod.register_forward_hook(
|
||||
fsdp_state._post_forward, prepend=False
|
||||
)
|
||||
if fsdp_param_group := fsdp_state._fsdp_param_group:
|
||||
fsdp_param_group.pre_backward = saved_methods.pre_backward
|
||||
fsdp_param_group.post_backward = saved_methods.post_backward
|
||||
self._fsdp_mod_to_saved_methods.clear()
|
||||
|
||||
if self._optimizer_hook_handles is not None:
|
||||
for handle in self._optimizer_hook_handles:
|
||||
handle.remove()
|
||||
self._optimizer_hook_handles = None
|
||||
|
||||
def track_inputs(self, inputs: tuple[Any, ...]) -> None:
|
||||
"""
|
||||
This is used to track the input tensors to the model and annotate them as ``Inputs``.
|
||||
Args:
|
||||
inputs (Tuple[Any]): A tuple containing the input data. This can include tensors
|
||||
as well as other data types. Only tensors will be tracked.
|
||||
"""
|
||||
|
||||
def _track_inputs(t: torch.Tensor) -> None:
|
||||
self._update_and_maybe_create_winfos(
|
||||
t,
|
||||
_FSDPRefType.INP,
|
||||
)
|
||||
|
||||
tree_map_only(torch.Tensor, _track_inputs, inputs)
|
||||
|
||||
def track_external(
|
||||
self, *external: Union[nn.Module, optim.Optimizer, torch.Tensor]
|
||||
) -> None:
|
||||
"""This is no-op for ``FSDPMemTracker``"""
|
||||
|
||||
def __enter__(self) -> "FSDPMemTracker":
|
||||
if self._depth == 0:
|
||||
self._register_module_and_optimizer_hooks()
|
||||
self._track_resize()
|
||||
self._track_dtensor_dispatch()
|
||||
self._peak_mem_snap = self.get_tracker_snapshot()
|
||||
self._peak_mem = {
|
||||
dev: dev_snap[_TOTAL_KEY]
|
||||
for dev, dev_snap in self._peak_mem_snap.items()
|
||||
}
|
||||
self._mod_tracker.__enter__()
|
||||
TorchDispatchMode.__enter__(self)
|
||||
self._depth += 1
|
||||
return self
|
||||
|
||||
def __exit__(self, *args: Any) -> None:
|
||||
self._depth -= 1
|
||||
if self._depth == 0:
|
||||
self._deregister_module_and_optimizer_hooks()
|
||||
self._restore_resize()
|
||||
self._restore_dtensor_dispatch()
|
||||
self._mod_tracker.__exit__(*args)
|
||||
TorchDispatchMode.__exit__(self, *args)
|
||||
|
||||
def __torch_dispatch__(self, func, types, args=..., kwargs=None): # type: ignore[no-untyped-def]
|
||||
if (
|
||||
func == torch.ops._c10d_functional.wait_tensor.default
|
||||
and active_fake_mode()
|
||||
):
|
||||
# N.B: This is a hacky way to override the Meta IMPL of wait_tensor. The original impl returns
|
||||
# a new tensor which does not happen in eager mode, when a wait_tensor is called.
|
||||
res = args[0]
|
||||
else:
|
||||
res = func(*args, **kwargs or {})
|
||||
# If we are tracking an optimizer state, we use the optimizer reference type.
|
||||
# If we are in backward region and not in AC region, we use the backward reference type.
|
||||
# Else we use the forward reference type.
|
||||
if self._in_opt:
|
||||
reftype = _FSDPRefType.OPT
|
||||
elif self._mod_tracker.is_bw and not self._in_ac:
|
||||
reftype = _FSDPRefType.TEMP
|
||||
else:
|
||||
reftype = _FSDPRefType.ACT
|
||||
if func == c10d._allgather_base_.default and self._fsdp_state in [
|
||||
_FSDPState.PRE_FW,
|
||||
_FSDPState.PRE_BW,
|
||||
]:
|
||||
output_tensor = args[0]
|
||||
self._update_and_maybe_create_winfos(
|
||||
output_tensor,
|
||||
_FSDPRefType.ALL_GATHER,
|
||||
update_existing=True,
|
||||
)
|
||||
if (
|
||||
func == c10d._reduce_scatter_base_.default
|
||||
and self._fsdp_state == _FSDPState.POST_BW
|
||||
):
|
||||
input_tensor = args[1]
|
||||
self._update_and_maybe_create_winfos(
|
||||
input_tensor,
|
||||
_FSDPRefType.REDUCE_SCATTER,
|
||||
update_existing=True,
|
||||
)
|
||||
|
||||
tree_map_only(torch.Tensor, partial(self._track, reftype), res)
|
||||
peak_state = (
|
||||
_FSDPModState.PEAK_BW if self._mod_tracker.is_bw else _FSDPModState.PEAK_FW
|
||||
)
|
||||
self._update_peak_stats(peak_state)
|
||||
return res
|
292
venv/Lib/site-packages/torch/distributed/_tools/ilp_utils.py
Normal file
292
venv/Lib/site-packages/torch/distributed/_tools/ilp_utils.py
Normal file
|
@ -0,0 +1,292 @@
|
|||
import copy
|
||||
from collections import OrderedDict
|
||||
from typing import cast, TypedDict
|
||||
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
from torch.distributed._tools.mem_tracker import (
|
||||
_MemRefType,
|
||||
_ModMemStats,
|
||||
_ModState,
|
||||
MemTracker,
|
||||
)
|
||||
from torch.distributed._tools.runtime_estimator import RuntimeEstimator
|
||||
from torch.distributed._tools.sac_estimator import SACEstimator, SACTradeOffStats
|
||||
|
||||
|
||||
class ModOrder(TypedDict):
|
||||
fw_pre_order: list[str]
|
||||
bw_pre_order: list[str]
|
||||
fw_post_order: list[str]
|
||||
bw_post_order: list[str]
|
||||
|
||||
|
||||
class ModRuntime(TypedDict):
|
||||
fw: float
|
||||
bw: float
|
||||
|
||||
|
||||
class ModStats(TypedDict):
|
||||
fqn: str
|
||||
# per-module params
|
||||
param_per_module: int
|
||||
# per-module grads
|
||||
grad_per_module: int
|
||||
# total accumulated gradients up to and including this module
|
||||
grad_total: int
|
||||
# per module fw activation size (excluding input and output)
|
||||
act_fw_per_module: int
|
||||
# per module bw activation size during peak_bw
|
||||
act_bw_per_module: int
|
||||
# per module activation grad size during peak_bw
|
||||
act_grad_per_module: int
|
||||
# total activation size up to but excluding the current module
|
||||
# includes input of the current module (i.e., output of previous module)
|
||||
act_total: int
|
||||
# Inputs to the module
|
||||
input_per_module: int
|
||||
# Outputs of the module
|
||||
output_per_module: int
|
||||
# Total fw run-time of the module
|
||||
fw_runtime_per_module: float
|
||||
# Total bw run-time of the module
|
||||
bw_runtime_per_module: float
|
||||
# Is this module a leaf module
|
||||
is_leaf: bool
|
||||
# Total ac run-time of the module
|
||||
sac_runtime: float
|
||||
# Total ac_memory for the module
|
||||
sac_memory: int
|
||||
# Number of piecewise-linear functions used for approximating ac tradeoff curve
|
||||
n_segments: int
|
||||
# Slopes of the of piecewise-linear functions
|
||||
slopes: list[float]
|
||||
# Intercepts of the of piecewise-linear functions
|
||||
intercepts: list[float]
|
||||
# X breakpoints of the of piecewise-linear functions
|
||||
breakpoints: list[float]
|
||||
# Original trade-off curves
|
||||
tradeoff_curve: OrderedDict[float, float]
|
||||
|
||||
|
||||
class ModuleInfo(TypedDict):
|
||||
mod_order: ModOrder
|
||||
mod_stats: list[ModStats]
|
||||
|
||||
|
||||
def aggregate_stats(
|
||||
model: torch.nn.Module,
|
||||
mem_tracker: MemTracker,
|
||||
runtime_estimator: RuntimeEstimator,
|
||||
sac_estimator: SACEstimator,
|
||||
dev: torch.device,
|
||||
) -> ModuleInfo:
|
||||
"""
|
||||
Collect modulewise stats for a given model, including memory, runtime, and AC tradeoff stats.
|
||||
|
||||
Args:
|
||||
model: nn.Module object
|
||||
runtime_estimator: RuntimeEstimator object with runtime stats
|
||||
mem_tracker: MemTracker object with memory stats
|
||||
sac_estimator: SACEstimator object with AC tradeoff stats
|
||||
dev: device the model was run on (used to extract memory stats from MemTracker)
|
||||
|
||||
Returns:
|
||||
ModuleInfo: A dictionary with module order and module stats.
|
||||
"""
|
||||
|
||||
# Memory stats
|
||||
mod_mem_stats: dict[torch.nn.Module, _ModMemStats] = dict(
|
||||
copy.deepcopy(mem_tracker.memory_tracking)
|
||||
)
|
||||
|
||||
# Runtime stats
|
||||
mod_runtime_stats: dict[str, ModRuntime] = {
|
||||
fqn: {"fw": v["fw"], "bw": v["bw"]}
|
||||
for fqn, v in runtime_estimator.mod_runtimes.items()
|
||||
}
|
||||
|
||||
# Module order
|
||||
mod_order: ModOrder = {
|
||||
"fw_pre_order": list(runtime_estimator.mod_fw_pre_order),
|
||||
"bw_pre_order": list(runtime_estimator.mod_bw_pre_order),
|
||||
"fw_post_order": list(runtime_estimator.mod_fw_post_order),
|
||||
"bw_post_order": list(runtime_estimator.mod_bw_post_order),
|
||||
}
|
||||
|
||||
# Selective Activation Checkpointing stats
|
||||
sac_estimator.pwlf_sac_tradeoff_curve()
|
||||
mod_sac_tradeoff_stats: dict[str, SACTradeOffStats] = copy.deepcopy(
|
||||
sac_estimator.sac_mod_tradeoff_stats
|
||||
)
|
||||
|
||||
module_info: ModuleInfo = {
|
||||
"mod_order": mod_order,
|
||||
"mod_stats": [],
|
||||
}
|
||||
|
||||
for mod in model.modules():
|
||||
if mod_mem_stat := mod_mem_stats.get(mod, None):
|
||||
if tradeoff_stats := mod_sac_tradeoff_stats.get(mod_mem_stat.mod_fqn, None):
|
||||
sac_runtime = tradeoff_stats.sac_runtime
|
||||
sac_memory = tradeoff_stats.sac_memory
|
||||
n_segments = tradeoff_stats.n_segments
|
||||
slopes = tradeoff_stats.slopes
|
||||
intercepts = tradeoff_stats.intercepts
|
||||
breakpoints = tradeoff_stats.fit_breaks
|
||||
tradeoff_curve = tradeoff_stats.tradeoff_curve
|
||||
is_leaf = False
|
||||
else:
|
||||
sac_runtime = sac_memory = n_segments = 0
|
||||
slopes = intercepts = breakpoints = []
|
||||
tradeoff_curve: OrderedDict[float, float] = OrderedDict() # type: ignore[no-redef]
|
||||
is_leaf = True
|
||||
mod_stat: ModStats = {
|
||||
"fqn": mod_mem_stat.mod_fqn,
|
||||
"param_per_module": mod_mem_stat.parameter_mem,
|
||||
"grad_per_module": mod_mem_stat.parameter_mem,
|
||||
"grad_total": mod_mem_stat.snapshots[_ModState.PRE_BW][-1][dev][
|
||||
_MemRefType.GRAD
|
||||
],
|
||||
"act_fw_per_module": max(
|
||||
0,
|
||||
mod_mem_stat.snapshots[_ModState.POST_FW][-1][dev][_MemRefType.ACT]
|
||||
- mod_mem_stat.snapshots[_ModState.PRE_FW][-1][dev][_MemRefType.ACT]
|
||||
- mod_mem_stat.output_mem,
|
||||
),
|
||||
"act_bw_per_module": max(
|
||||
0,
|
||||
mod_mem_stat.snapshots[_ModState.PEAK_BW][-1][dev][_MemRefType.ACT],
|
||||
),
|
||||
"act_grad_per_module": (
|
||||
mod_mem_stat.snapshots[_ModState.PEAK_BW][-1][dev][_MemRefType.TEMP]
|
||||
- mod_mem_stat.snapshots[_ModState.PRE_BW][-1][dev][
|
||||
_MemRefType.TEMP
|
||||
]
|
||||
),
|
||||
"act_total": mod_mem_stat.snapshots[_ModState.POST_FW][-1][dev][
|
||||
_MemRefType.ACT
|
||||
],
|
||||
"input_per_module": mod_mem_stat.input_mem,
|
||||
"output_per_module": mod_mem_stat.output_mem,
|
||||
"fw_runtime_per_module": mod_runtime_stats[mod_mem_stat.mod_fqn]["fw"],
|
||||
"bw_runtime_per_module": mod_runtime_stats[mod_mem_stat.mod_fqn]["bw"],
|
||||
"is_leaf": is_leaf,
|
||||
"sac_runtime": sac_runtime,
|
||||
"sac_memory": sac_memory,
|
||||
"n_segments": n_segments,
|
||||
"slopes": slopes,
|
||||
"intercepts": intercepts,
|
||||
"breakpoints": breakpoints,
|
||||
"tradeoff_curve": tradeoff_curve,
|
||||
}
|
||||
module_info["mod_stats"].append(mod_stat)
|
||||
|
||||
return module_info
|
||||
|
||||
|
||||
class Node(ModStats):
|
||||
index: int # index according to forward pre-order
|
||||
pos_fw_post_order: int # index according to forward post-order
|
||||
|
||||
|
||||
class Graph:
|
||||
def __init__(self, n: int) -> None:
|
||||
self.nodes: list[Node] = []
|
||||
self.name2node: dict[str, Node] = {}
|
||||
self.ad_matrix = np.zeros((n, n))
|
||||
self.fw_post_order: list[str] = []
|
||||
|
||||
def add_node(self, node: Node) -> None:
|
||||
self.nodes.append(node)
|
||||
self.name2node[node["fqn"]] = node
|
||||
|
||||
|
||||
def parse_module_info(module_info: ModuleInfo) -> Graph:
|
||||
"""
|
||||
Parse module info and create a graph (tree) of modules. The graph will be
|
||||
used by MILP solver to find optimal SAC and/or FSDP configurations.
|
||||
"""
|
||||
mod_stats = module_info["mod_stats"]
|
||||
fw_pre_order = module_info["mod_order"]["fw_pre_order"]
|
||||
# assertion and number of nodes
|
||||
assert len(mod_stats) == len(fw_pre_order)
|
||||
n_nodes = len(mod_stats)
|
||||
|
||||
# create graph
|
||||
g = Graph(n_nodes)
|
||||
g.fw_post_order = module_info["mod_order"]["fw_post_order"]
|
||||
|
||||
# sort the modules by pre-order and add them to the graph
|
||||
module_info["mod_stats"] = sorted(
|
||||
mod_stats, key=lambda x: fw_pre_order.index(x["fqn"])
|
||||
)
|
||||
for i, one_mod_stats in enumerate(mod_stats):
|
||||
node: Node = cast(Node, one_mod_stats)
|
||||
node["index"] = i
|
||||
node["pos_fw_post_order"] = g.fw_post_order.index(node["fqn"])
|
||||
g.add_node(node)
|
||||
|
||||
# set up ancestor-descendant matrix
|
||||
for i in range(n_nodes):
|
||||
for j in range(i, n_nodes):
|
||||
if is_self_or_submodule(g.nodes[j]["fqn"], g.nodes[i]["fqn"]):
|
||||
g.ad_matrix[i][j] = 1
|
||||
else:
|
||||
break
|
||||
|
||||
return g
|
||||
|
||||
|
||||
def is_self_or_submodule(name_descendant: str, name_ancestor: str) -> bool:
|
||||
"""
|
||||
check if name_descendant is a submodule of name_ancestor, or if they are the same
|
||||
"""
|
||||
return name_descendant == name_ancestor or name_ancestor + "." in name_descendant
|
||||
|
||||
|
||||
def is_submodule(name_descendant: str, name_ancestor: str) -> bool:
|
||||
"""
|
||||
if name_descendant is a submodule of name_ancestor, but not the same
|
||||
"""
|
||||
return name_ancestor + "." in name_descendant
|
||||
|
||||
|
||||
def display_bytes(b: int, unit: str = "MiB") -> str:
|
||||
"""
|
||||
return a string that represent the number of bytes in a desired unit
|
||||
"""
|
||||
if unit == "KiB":
|
||||
return f"{b / 2**10:.2f} KiB"
|
||||
if unit == "MiB":
|
||||
return f"{b / 2**20:.2f} MiB"
|
||||
if unit == "GiB":
|
||||
return f"{b / 2**30:.2f} GiB"
|
||||
return f"{b:.2f} bytes"
|
||||
|
||||
|
||||
def get_peak_memory_runtime_baseline(graph: Graph) -> tuple[int, float]:
|
||||
"""
|
||||
Get the baseline peak memory and runtime.
|
||||
Baseline here means there is no FSDP or AC.
|
||||
Memory includes the parameters, gradients, activations, and activation gradients.
|
||||
Memory does not include e.g., optimizer states, embedding tables, etc.
|
||||
|
||||
Returns:
|
||||
int: peak memory in bytes
|
||||
float: compute time in ms
|
||||
"""
|
||||
P_1 = graph.nodes[0]["param_per_module"]
|
||||
num_nodes = len(graph.nodes)
|
||||
peak_mem = 0
|
||||
for i in range(num_nodes):
|
||||
TG_i = graph.nodes[i]["grad_total"]
|
||||
AG_i = graph.nodes[i]["act_grad_per_module"]
|
||||
TA_i = graph.nodes[i]["act_total"]
|
||||
peak_mem = max(peak_mem, P_1 + TG_i + AG_i + TA_i)
|
||||
compute_time = (
|
||||
graph.nodes[0]["fw_runtime_per_module"]
|
||||
+ graph.nodes[0]["bw_runtime_per_module"]
|
||||
)
|
||||
return (peak_mem, compute_time)
|
949
venv/Lib/site-packages/torch/distributed/_tools/mem_tracker.py
Normal file
949
venv/Lib/site-packages/torch/distributed/_tools/mem_tracker.py
Normal file
|
@ -0,0 +1,949 @@
|
|||
import math
|
||||
import os
|
||||
import re
|
||||
import warnings
|
||||
from contextlib import nullcontext
|
||||
from copy import deepcopy
|
||||
from enum import auto, Enum
|
||||
from functools import partial, wraps
|
||||
from typing import Any, Callable, Optional, TYPE_CHECKING, Union
|
||||
from typing_extensions import Self
|
||||
|
||||
import torch
|
||||
import torch.distributed._tools.fake_collectives
|
||||
from torch import nn, optim
|
||||
from torch._guards import active_fake_mode
|
||||
from torch.distributed._tools.common_utils import get_untyped_storages
|
||||
from torch.distributed._tools.mod_tracker import ModTracker
|
||||
from torch.distributed.tensor import DTensor
|
||||
from torch.optim.optimizer import (
|
||||
register_optimizer_step_post_hook,
|
||||
register_optimizer_step_pre_hook,
|
||||
)
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
from torch.utils._pytree import tree_flatten, tree_map_only
|
||||
from torch.utils.weak import WeakIdKeyDictionary, weakref
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from torch.utils.hooks import RemovableHandle
|
||||
|
||||
# This value is hard-coded here:
|
||||
# https://github.com/pytorch/pytorch/blob/5fba5d83f0703ff8077ab65448a998e9ad6598fd/c10/cuda/CUDACachingAllocator.cpp#L117
|
||||
_PYTORCH_MIN_ALLOCATE = (
|
||||
2**9 if int(os.environ.get("PYTORCH_NO_CUDA_MEMORY_CACHING", 0)) == 0 else 1
|
||||
)
|
||||
_TOTAL_KEY = "Total"
|
||||
|
||||
__all__ = ["MemTracker"]
|
||||
|
||||
|
||||
class _RefType(str, Enum):
|
||||
"""Base Class for defining memory reference types, categorizing tensors based on their usage within a model."""
|
||||
|
||||
|
||||
class _State(str, Enum):
|
||||
"""Base Class for defining module state to capture snapshots ."""
|
||||
|
||||
|
||||
class _MemRefType(_RefType):
|
||||
"""
|
||||
An enum to define memory reference types, categorizing tensors based on their usage within a model.
|
||||
|
||||
- PARAM: Tensors registered as nn.Parameter within modules.
|
||||
- BUFFER: Tensors registered as nn.Buffer within modules.
|
||||
- GRAD: Gradients associated with parameters.
|
||||
- ACT: Tensors produced during the forward pass and recomputation in activation checkpointing.
|
||||
- TMP: Temporary memory used during the backward pass, including gradients of activations.
|
||||
- OPT: Tensors holding optimizer states.
|
||||
- OTH: Tensors registered via `track_external` that do not fit the above categories.
|
||||
"""
|
||||
|
||||
PARAM = "Parameter"
|
||||
BUFFER = "Buffer"
|
||||
GRAD = "Gradient"
|
||||
ACT = "Activation"
|
||||
TEMP = "Temp"
|
||||
OPT = "Optstate"
|
||||
OTH = "Other"
|
||||
|
||||
|
||||
class _ModState(_State):
|
||||
"""
|
||||
An enum to define the state of a module.
|
||||
|
||||
- PRE_FW: The module is about to run the forward pass.
|
||||
- POST_FW: The module has finished running the forward pass.
|
||||
- PEAK_FW: The module has reached the peak memory usage during the forward pass.
|
||||
- PRE_BW: The module is about to run the backward pass.
|
||||
- PRE_FW_AC: The module is about to run the forward pass with activation checkpointing.
|
||||
- POST_FW_AC: The module has finished running the forward pass with activation checkpointing.
|
||||
- POST_BW: The module has finished running the backward pass.
|
||||
- PEAK_BW: The module has reached the peak memory usage during the backward pass.
|
||||
"""
|
||||
|
||||
PRE_FW = "Pre-Forward"
|
||||
POST_FW = "Post-Forward"
|
||||
PEAK_FW = "Peak-Forward"
|
||||
PRE_BW = "Pre-Backward"
|
||||
PRE_FW_AC = "Pre-Forward-AC"
|
||||
POST_FW_AC = "Post-Forward-AC"
|
||||
POST_BW = "Post-Backward"
|
||||
PEAK_BW = "Peak-Backward"
|
||||
|
||||
|
||||
class _ModMemStats:
|
||||
"""
|
||||
A class to store the memory statistics of a module.
|
||||
|
||||
Args:
|
||||
mod_fqn (str): The fully qualified name of the module.
|
||||
Attributes:
|
||||
mod_fqn (str): The fully qualified name of the module.
|
||||
parameter_mem (int): The memory usage of the parameters of the module.
|
||||
buffer_mem (int): The memory usage of the buffers of the module.
|
||||
input_mem (int): The memory usage of the inputs to the module.
|
||||
output_mem (int): The memory usage of the outputs from the module.
|
||||
snapshots (Dict[_ModState, Dict[torch.device, Dict[str, int]]]): A dictionary of memory snapshots
|
||||
of the module at different states defined by ``_ModState``.
|
||||
Note:
|
||||
The memory snapshot is stored as a dictionary - Dict[torch.device, Dict[str, int]], where each key is a device,
|
||||
and each value is another dictionary with keys as memory reference types defined by `_MemRefType` and
|
||||
values as the memory consumed in bytes.
|
||||
"""
|
||||
|
||||
def __init__(self, mod_fqn: str):
|
||||
self.mod_fqn = mod_fqn
|
||||
self.parameter_mem: int
|
||||
self.buffer_mem: int
|
||||
self.input_mem: int
|
||||
self.output_mem: int
|
||||
self.local_peak: dict[torch.device, int] = {}
|
||||
self.snapshots: dict[_ModState, list[dict[torch.device, dict[str, int]]]] = {}
|
||||
|
||||
|
||||
class _WeakRefInfo:
|
||||
"""
|
||||
Manages memory statistics and device attributes for tensor storages.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, size: int, element_size: int, device: torch.device, reftype: _RefType
|
||||
) -> None:
|
||||
"""
|
||||
Initializes the ``_WeakRefInfo`` object with tensor storage properties.
|
||||
|
||||
Args:
|
||||
size (int): The number of elements in the tensor storage.
|
||||
element_size (int): The size of each element in the tensor storage.
|
||||
device (torch.device): The device on which the tensor is allocated.
|
||||
reftype (_RefType): The reference type of the tensor.
|
||||
"""
|
||||
self.size = size
|
||||
self.element_size = element_size
|
||||
self.reftype = reftype
|
||||
self.device = device
|
||||
self.mem_consumed = self._calculate_mem_consumed()
|
||||
|
||||
def _calculate_mem_consumed(self) -> int:
|
||||
"""
|
||||
Calculates the memory consumed by the tensor storage, considering device-specific allocation rules.
|
||||
|
||||
Returns:
|
||||
int: The memory consumed in bytes.
|
||||
"""
|
||||
mem = self.size * self.element_size
|
||||
if self.device.type == "cuda":
|
||||
return math.ceil((mem) / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE
|
||||
return mem
|
||||
|
||||
def update_mem_consumed(self, st: torch.UntypedStorage) -> int:
|
||||
"""
|
||||
Updates and returns the memory consumed if the storage size has changed.
|
||||
|
||||
Args:
|
||||
st (torch.UntypedStorage): The tensor storage to check for size updates.
|
||||
|
||||
Returns:
|
||||
int: The updated memory consumed in bytes.
|
||||
"""
|
||||
if st.size() != self.size:
|
||||
self.size = st.size()
|
||||
self.mem_consumed = self._calculate_mem_consumed()
|
||||
return self.mem_consumed
|
||||
|
||||
@classmethod
|
||||
def create_winfo(
|
||||
cls,
|
||||
st: torch.UntypedStorage,
|
||||
device: torch.device,
|
||||
reftype: _RefType,
|
||||
callback: Optional[Callable[[Self, weakref.ref], Any]] = None,
|
||||
) -> tuple[Self, weakref.ref]:
|
||||
"""
|
||||
Creates a new ``_WeakRefInfo`` instance and a weak reference to a ``torch.UntypedStorage`` object,
|
||||
optionally attaching a callback to the weak reference.
|
||||
|
||||
Args:
|
||||
st (torch.UntypedStorage): The storage object for which to create the weak reference info.
|
||||
device (torch.device): The device associated with the storage object.
|
||||
reftype (_RefType): The type of reference, used to categorize the storage.
|
||||
callback (Optional[Callable[[Self, weakref.ref]]]): A callback function that is called when
|
||||
the storage object is about to be finalized (garbage collected). The callback function
|
||||
should accept two arguments: the ``_WeakRefInfo`` instance and the weak reference to the storage.
|
||||
Returns:
|
||||
Tuple[Self, weakref.ref]: A tuple containing the newly created ``_WeakRefInfo`` instance and the
|
||||
weak reference to the storage object. The weak reference may have an attached callback if provided.
|
||||
"""
|
||||
|
||||
winfo = cls(st.size(), st.element_size(), device, reftype)
|
||||
w_st = weakref.ref(st, partial(callback, winfo) if callback else None)
|
||||
return winfo, w_st
|
||||
|
||||
|
||||
def _get_mem_divisor(units: str) -> int:
|
||||
unit_dict = {"B": 1, "KiB": 2**10, "MiB": 2**20, "GiB": 2**30}
|
||||
if units in unit_dict:
|
||||
return unit_dict[units]
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Unsupported unit: {units}. Supported units are: {', '.join(unit_dict.keys())}"
|
||||
)
|
||||
|
||||
|
||||
def _rounding_fn(value: int, divisor: int, precision: int) -> Union[float, int]:
|
||||
return value if divisor == 1 else round(value / divisor, precision)
|
||||
|
||||
|
||||
def _print_snapshot(snapshot: dict[torch.device, dict[str, int]], units: str) -> None:
|
||||
if len(snapshot) == 0:
|
||||
print("No memory tracked.")
|
||||
return
|
||||
divisor = _get_mem_divisor(units)
|
||||
for dev, dev_snap in snapshot.items():
|
||||
if _rounding_fn(dev_snap[_TOTAL_KEY], divisor, 2) <= 0:
|
||||
continue
|
||||
print(
|
||||
f"Device: {dev}",
|
||||
*(
|
||||
f"\t{k.value}: {_rounding_fn(v, divisor, 2)} {units}"
|
||||
if isinstance(k, _RefType)
|
||||
else f"\t{k}: {_rounding_fn(v, divisor, 2)} {units}"
|
||||
for k, v in dev_snap.items()
|
||||
),
|
||||
sep="\n",
|
||||
)
|
||||
|
||||
|
||||
def _print_snapshot_tabular(
|
||||
snapshot: dict[torch.device, dict[str, int]], units: str
|
||||
) -> None:
|
||||
if len(snapshot) == 0:
|
||||
print("No memory tracked.")
|
||||
return
|
||||
try:
|
||||
from tabulate import tabulate
|
||||
except ImportError as err:
|
||||
raise ImportError(
|
||||
"Please install tabulate to use the tabulate option."
|
||||
) from err
|
||||
divisor = _get_mem_divisor(units)
|
||||
table_data = []
|
||||
key_list = list(next(iter(snapshot.values())).keys())
|
||||
headers = ["Device"] + [
|
||||
f"{key.value}" if isinstance(key, _RefType) else f"{key}" for key in key_list
|
||||
]
|
||||
|
||||
for dev, dev_snap in snapshot.items():
|
||||
if _rounding_fn(dev_snap[_TOTAL_KEY], divisor, 2) <= 0:
|
||||
continue
|
||||
row = [str(dev)]
|
||||
row.extend(f"{_rounding_fn(v, divisor, 2)} {units}" for v in dev_snap.values())
|
||||
table_data.append(row)
|
||||
print(tabulate(table_data, headers=headers, tablefmt="rst"))
|
||||
|
||||
|
||||
def _print_state_snapshots(
|
||||
snapshots: dict[_State, list[dict[torch.device, dict[str, int]]]], units: str
|
||||
) -> None:
|
||||
for state, snapshot_list in snapshots.items():
|
||||
print(f"{state.value}")
|
||||
for i, snapshot in enumerate(snapshot_list):
|
||||
print(f"# {i + 1}:")
|
||||
_print_snapshot(snapshot, units)
|
||||
print()
|
||||
|
||||
|
||||
def _print_state_snapshots_tabular(
|
||||
snapshots: dict[_State, list[dict[torch.device, dict[str, int]]]], units: str
|
||||
) -> None:
|
||||
try:
|
||||
from tabulate import tabulate
|
||||
except ImportError as err:
|
||||
raise ImportError(
|
||||
"Please install tabulate to use the tabulate option."
|
||||
) from err
|
||||
|
||||
table_data = []
|
||||
last_state_call = None
|
||||
divisor = _get_mem_divisor(units)
|
||||
for state, snapshot_list in snapshots.items():
|
||||
for i, snapshot in enumerate(snapshot_list):
|
||||
state_call = f"{state.value} # {i + 1}"
|
||||
for dev, dev_snap in snapshot.items():
|
||||
if _rounding_fn(dev_snap[_TOTAL_KEY], divisor, 2) <= 0:
|
||||
continue
|
||||
row = {
|
||||
"State & Call": (
|
||||
state_call if state_call != last_state_call else ""
|
||||
),
|
||||
"Device": str(dev),
|
||||
}
|
||||
last_state_call = state_call
|
||||
for k, v in dev_snap.items():
|
||||
row[f"{k.value}" if isinstance(k, _RefType) else f"{k}"] = (
|
||||
f"{_rounding_fn(v, divisor, 2)} {units}"
|
||||
)
|
||||
table_data.append(row)
|
||||
print(tabulate(table_data, headers="keys", tablefmt="rst"))
|
||||
|
||||
|
||||
class _UpdateType(Enum):
|
||||
# These are used for tracking updates to the continuouly maintained memory snapshot.
|
||||
# ADD - When a new tensor storage is tracked
|
||||
# DEL - When a tensor storage is about to be finalized (garbage collected).
|
||||
# REF - When a tensor reference is updated, for instance, the gradients are marked as
|
||||
# generic backward reference types until the grad_hook categorizes them as gradients.
|
||||
# SIZE - When a tensor's storage is resized.
|
||||
ADD = auto()
|
||||
DEL = auto()
|
||||
REF = auto()
|
||||
SIZE = auto()
|
||||
|
||||
|
||||
class MemTracker(TorchDispatchMode):
|
||||
"""
|
||||
A TorchDispatchMode to track, categorize and attribute the tensor memory created or accessed within its context.
|
||||
|
||||
It categorizes the tracked tensors as parameters, buffers, activations, gradients, temporary memory and optimizer states
|
||||
as defined by ``_MemRefType`` within its context. It captures memory `snapshots` for the modules, called within its context,
|
||||
at various states defined by ``_ModState``.
|
||||
|
||||
Attributes:
|
||||
memory_tracking: A weakref key dictionary to store the memory statistics of each module. Each key
|
||||
is a reference to a module, and each value is a ``_ModMemStats`` object that stores the memory
|
||||
statistics of the module.
|
||||
|
||||
Note:
|
||||
The MemTracker should be used as a context manager. The modules, optimizers, and any other tensors created within
|
||||
the context of MemTracker will be tracked by default. Any tensors or stateful objects such as modules, optimizers etc.
|
||||
that need to be tracked but are created outside the MemTracker should be registered using the `track_external` method.
|
||||
The `track_external` method should be called before the MemTracker is used. Any tensors created outside the ``MemTracker``
|
||||
and not supplied to the `track_external` method will not be tracked by the ``MemTracker``.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
module = ...
|
||||
optimizer = ...
|
||||
inp = ...
|
||||
mem_tracker = MemTracker()
|
||||
mem_tracker.track_external(module, optimizer, inp)
|
||||
with mem_tracker as mt:
|
||||
loss = module(inp)
|
||||
print("After Forward:")
|
||||
mt.display_snapshot("current")
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
mt.display_snapshot("peak")
|
||||
mt.display_modulewise_snapshots(depth=3, units="MiB")
|
||||
|
||||
Known Limitations:
|
||||
- The ``MemTracker`` does not track memory for tensors that bypass the ``TorchDispatchMode`` ex. under ``no_dispatch``.
|
||||
- Resizing tensor storages directly by using non-Tensor methods other than using ``torch.Untyped_Storage.resize_``
|
||||
is not tracked. File a Github issue if you have use-cases for this.
|
||||
- If the tensors are not traceable or wrappable subclasses of ``torch.Tensor``, then the tracker does not know how to
|
||||
track their storages. File a Github issue if you have use-cases for this.
|
||||
- During AC in the backward pass there might be misattribution between activation and temp memory, but the peak memory
|
||||
will be tracked accurately. This will be fixed in the next update by hooking intricately with ``torch.uitls.checkpoint``.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.memory_tracking = WeakIdKeyDictionary()
|
||||
self._curr_mem_snap: dict[torch.device, dict[str, int]] = {}
|
||||
self._peak_mem: dict[torch.device, int] = {}
|
||||
self._peak_mem_snap: dict[torch.device, dict[str, int]] = {}
|
||||
self._param_to_grad_hook_handles = WeakIdKeyDictionary()
|
||||
self._optimizer_hook_handles: Optional[
|
||||
tuple[RemovableHandle, RemovableHandle]
|
||||
] = None
|
||||
# Dictionary to store the ``_WeakRefInfo`` instances corresponding to each tensor's storage.
|
||||
self._WINFO = WeakIdKeyDictionary()
|
||||
self._mod_tracker = ModTracker()
|
||||
# This is a general memory tracker which can be used with any ``_RefType`` subclass
|
||||
self._ref_class: type[_RefType] = _MemRefType
|
||||
# Flags to track if we are in the AC region or optimizer step region
|
||||
self._in_opt: bool = False
|
||||
self._in_ac: bool = False
|
||||
# Weak references to the topmost AC module currently active
|
||||
self._ac_mod: Optional[weakref.ref] = None
|
||||
self._orig_resize = torch.UntypedStorage.resize_
|
||||
self._orig_dtensor_dispatch = DTensor._op_dispatcher.dispatch
|
||||
self._depth = 0
|
||||
|
||||
def _update_snap(
|
||||
self,
|
||||
u_type: _UpdateType,
|
||||
winfo: _WeakRefInfo,
|
||||
old_mem_consumed: Optional[int] = None,
|
||||
old_reftype: Optional[_RefType] = None,
|
||||
) -> None:
|
||||
# Initialize a flag to track if the total memory might drop to zero after updates.
|
||||
maybe_zero = False
|
||||
# Ensure the device entry exists in the current memory snapshot, initializing if necessary.
|
||||
dev_snap = self._curr_mem_snap.setdefault(
|
||||
winfo.device, dict.fromkeys(self._ref_class, 0)
|
||||
)
|
||||
dev_snap.setdefault(_TOTAL_KEY, 0)
|
||||
# Handle different types of updates based on the update type (`u_type`).
|
||||
if u_type == _UpdateType.ADD:
|
||||
# Increase the memory consumed for the specific reference type and update the total.
|
||||
dev_snap[winfo.reftype] += winfo.mem_consumed
|
||||
dev_snap[_TOTAL_KEY] += winfo.mem_consumed
|
||||
elif u_type == _UpdateType.DEL:
|
||||
# Decrease the memory consumed for the specific reference type and reduce the total.
|
||||
dev_snap[winfo.reftype] -= winfo.mem_consumed
|
||||
dev_snap[_TOTAL_KEY] -= winfo.mem_consumed
|
||||
maybe_zero = True
|
||||
elif u_type == _UpdateType.REF:
|
||||
assert old_reftype is not None
|
||||
# Adjust memory consumption between two reference types within the same device.
|
||||
dev_snap[old_reftype] -= winfo.mem_consumed
|
||||
dev_snap[winfo.reftype] += winfo.mem_consumed
|
||||
elif u_type == _UpdateType.SIZE:
|
||||
assert old_mem_consumed is not None
|
||||
# Adjust the memory consumed for a reference type due to a change in size.
|
||||
change = winfo.mem_consumed - old_mem_consumed
|
||||
dev_snap[winfo.reftype] += change
|
||||
dev_snap[_TOTAL_KEY] += change
|
||||
maybe_zero = True
|
||||
else:
|
||||
raise ValueError(f"Invalid update type: {u_type}")
|
||||
# Check if the total memory for the device has dropped to zero.
|
||||
if maybe_zero:
|
||||
if self._curr_mem_snap[winfo.device][_TOTAL_KEY] == 0:
|
||||
# Remove the device entry from the memory snapshot if the total memory is zero.
|
||||
del self._curr_mem_snap[winfo.device]
|
||||
|
||||
def _update_and_maybe_create_winfos(
|
||||
self,
|
||||
t: torch.Tensor,
|
||||
reftype: _RefType,
|
||||
update_existing: bool = False,
|
||||
) -> set[_WeakRefInfo]:
|
||||
sts = get_untyped_storages(t)
|
||||
winfos = set()
|
||||
for st in sts:
|
||||
# Attempt to retrieve existing ``_WeakRefInfo`` and its weak reference from the tracking dictionary.
|
||||
winfo, _ = self._WINFO.get(st, (None, None))
|
||||
if winfo is not None:
|
||||
# If ``_WeakRefInfo`` exists, check if the reference type needs to be updated.
|
||||
old_reftype = winfo.reftype
|
||||
if old_reftype != reftype:
|
||||
# Update the reference type and apply changes via ``_update_snap``.
|
||||
winfo.reftype = reftype
|
||||
self._update_snap(_UpdateType.REF, winfo, old_reftype=old_reftype)
|
||||
winfos.add(winfo)
|
||||
elif update_existing:
|
||||
# If no existing ``_WeakRefInfo`` is found and update_existing is True, raise an error.
|
||||
raise KeyError("No existing winfo found")
|
||||
else:
|
||||
# If no existing _WeakRefInfo is found and update_existing is False, create a new ``_WeakRefInfo``.
|
||||
winfo, w_st = _WeakRefInfo.create_winfo(
|
||||
st, t.device, reftype, self._delete_callback
|
||||
)
|
||||
# Store the new ``_WeakRefInfo`` and its weak reference in the tracking dictionary.
|
||||
self._WINFO[st] = (winfo, w_st)
|
||||
# Update the snapshot for the newly added ``_WeakRefInfo``.
|
||||
if winfo.mem_consumed > 0:
|
||||
self._update_snap(_UpdateType.ADD, winfo)
|
||||
winfos.add(winfo)
|
||||
return winfos
|
||||
|
||||
def _delete_callback(self, winfo: _WeakRefInfo, w_st: weakref.ref) -> None:
|
||||
# Callback to be called when the storage object corresponding to the ``_WeakRefInfo``
|
||||
# instance is about to be finalized.
|
||||
if winfo.mem_consumed > 0:
|
||||
self._update_snap(_UpdateType.DEL, winfo)
|
||||
|
||||
def _track_resize(self) -> None:
|
||||
# Need to monkey-patch this because ``torch.UntypedStorage.resize_`` is not captured
|
||||
# by ``TorchDispatchMode``.
|
||||
@wraps(self._orig_resize)
|
||||
def resize_(st: torch.UntypedStorage, size: int) -> None:
|
||||
self._orig_resize(st, size)
|
||||
winfo, _ = self._WINFO.get(st, (None, None))
|
||||
if winfo is not None and winfo.size != st.size():
|
||||
old_mem_consumed = winfo.mem_consumed
|
||||
winfo.update_mem_consumed(st)
|
||||
self._update_snap(
|
||||
_UpdateType.SIZE, winfo, old_mem_consumed=old_mem_consumed
|
||||
)
|
||||
|
||||
torch.UntypedStorage.resize_ = resize_ # type: ignore[method-assign, assignment]
|
||||
|
||||
def _restore_resize(self) -> None:
|
||||
torch.UntypedStorage.resize_ = self._orig_resize # type: ignore[method-assign]
|
||||
|
||||
def _update_peak_stats(self, peak_state: _State) -> None:
|
||||
# We first capture the current memory snapshot of the current tracker state then,
|
||||
# We step through each of the modules we have tracked so far in ``memory_tracking``
|
||||
# and check if it is currently active by querying ``_mod_tracker.parents``
|
||||
# If it is active, we update the per device peak memory usage for the module
|
||||
# corresponding to the ``_State`` which can be ``PEAK_FW`` or ``PEAK_BW``.
|
||||
curr_snap = self._curr_mem_snap
|
||||
|
||||
for mod_stats in self.memory_tracking.values():
|
||||
if mod_stats.mod_fqn in self._mod_tracker.parents:
|
||||
if peak_state in mod_stats.snapshots:
|
||||
for dev, dev_snap in curr_snap.items():
|
||||
if mod_stats.local_peak.get(dev, 0) < dev_snap[_TOTAL_KEY]:
|
||||
mod_stats.local_peak[dev] = dev_snap[_TOTAL_KEY]
|
||||
mod_stats.snapshots[peak_state][-1][dev] = deepcopy(
|
||||
dev_snap
|
||||
)
|
||||
|
||||
for dev, dev_snap in curr_snap.items():
|
||||
if self._peak_mem.get(dev, 0) < dev_snap[_TOTAL_KEY]:
|
||||
self._peak_mem[dev] = dev_snap[_TOTAL_KEY]
|
||||
self._peak_mem_snap[dev] = deepcopy(dev_snap)
|
||||
|
||||
def _track(self, reftype: _RefType, t: torch.Tensor) -> None:
|
||||
# Get the storages of the tensor and check if we have already tracked them.
|
||||
# If yes, then check if the storage size has changed and update the current snapshot.
|
||||
# Else create a new ``_WeakRefInfo`` instance and add it to the dictionary.
|
||||
sts = get_untyped_storages(t)
|
||||
for st in sts:
|
||||
winfo, _ = self._WINFO.get(st, (None, None))
|
||||
if winfo is not None:
|
||||
if winfo.size != st.size():
|
||||
old_mem_consumed = winfo.mem_consumed
|
||||
winfo.update_mem_consumed(st)
|
||||
self._update_snap(
|
||||
_UpdateType.SIZE, winfo, old_mem_consumed=old_mem_consumed
|
||||
)
|
||||
return
|
||||
else:
|
||||
winfo, w_st = _WeakRefInfo.create_winfo(
|
||||
st, t.device, reftype, self._delete_callback
|
||||
)
|
||||
self._WINFO[st] = (winfo, w_st)
|
||||
# Update the current snapshot for the newly added ``_WeakRefInfo``.
|
||||
if winfo.mem_consumed > 0:
|
||||
self._update_snap(_UpdateType.ADD, winfo)
|
||||
|
||||
def get_tracker_snapshot(
|
||||
self, type: str = "current"
|
||||
) -> dict[torch.device, dict[str, int]]:
|
||||
"""
|
||||
Capture a snapshot of the memory usage breakdown per device, based on the specified type.
|
||||
|
||||
Args:
|
||||
type (str): The type of snapshot to capture. Can be "current" for the current memory usage or "peak" for the
|
||||
peak memory usage. Defaults to "current".
|
||||
Returns:
|
||||
Dict[torch.device, Dict[str, int]]: A dictionary where each key is a torch.device, and each value is another
|
||||
dictionary. This inner dictionary has keys representing memory reference
|
||||
types as defined in ``_MemRefType`` and values representing the amount of
|
||||
memory consumed in bytes.
|
||||
Raises:
|
||||
ValueError: If an invalid type is specified.
|
||||
"""
|
||||
if type == "current":
|
||||
return deepcopy(self._curr_mem_snap)
|
||||
elif type == "peak":
|
||||
return deepcopy(self._peak_mem_snap)
|
||||
else:
|
||||
raise ValueError(f"Invalid type {type}")
|
||||
|
||||
def _track_module_params_and_buffers(
|
||||
self, module: nn.Module, install_grad_hooks: bool = True
|
||||
) -> tuple[int, int]:
|
||||
# Track the parameters and buffers of the module if not already tracked.
|
||||
# If the parameters have gradients, track the gradients as well.
|
||||
# If install_grad_hooks is True, install a gradient hook on the parameters
|
||||
# to track the gradients, if it has not already been installed.
|
||||
# Return the total memory consumed by the parameters and buffers.
|
||||
def _grad_hook(grad: torch.Tensor) -> None:
|
||||
self._update_and_maybe_create_winfos(
|
||||
grad,
|
||||
_MemRefType.GRAD,
|
||||
)
|
||||
|
||||
param_memory = 0
|
||||
for param in module.parameters():
|
||||
winfos = self._update_and_maybe_create_winfos(
|
||||
param,
|
||||
_MemRefType.PARAM,
|
||||
)
|
||||
param_memory += sum(winfo.mem_consumed for winfo in winfos)
|
||||
if param.grad is not None:
|
||||
self._update_and_maybe_create_winfos(
|
||||
param.grad,
|
||||
_MemRefType.GRAD,
|
||||
)
|
||||
if (
|
||||
self._param_to_grad_hook_handles.get(param, None) is None
|
||||
and install_grad_hooks
|
||||
):
|
||||
grad_hook_handle = param.register_hook(_grad_hook)
|
||||
post_acc_grad_hook_handle = param.register_post_accumulate_grad_hook(
|
||||
lambda p: (_grad_hook(p.grad))
|
||||
)
|
||||
self._param_to_grad_hook_handles[param] = (
|
||||
grad_hook_handle,
|
||||
post_acc_grad_hook_handle,
|
||||
)
|
||||
buffer_memory = 0
|
||||
for buffer in module.buffers():
|
||||
winfos = self._update_and_maybe_create_winfos(
|
||||
buffer,
|
||||
_MemRefType.BUFFER,
|
||||
)
|
||||
buffer_memory += sum(winfo.mem_consumed for winfo in winfos)
|
||||
return (param_memory, buffer_memory)
|
||||
|
||||
def _track_inputs_or_outputs(self, args: Any) -> int:
|
||||
# Calculate the memory consumed by the inputs or outputs of the module.
|
||||
input_or_output_memory = 0
|
||||
|
||||
def add_inps_or_outs(t: torch.Tensor) -> None:
|
||||
nonlocal input_or_output_memory
|
||||
sts = get_untyped_storages(t)
|
||||
for st in sts:
|
||||
winfo, _ = self._WINFO.get(st, (None, None))
|
||||
if winfo is not None:
|
||||
input_or_output_memory += winfo.mem_consumed
|
||||
|
||||
tree_map_only(torch.Tensor, add_inps_or_outs, args)
|
||||
return input_or_output_memory
|
||||
|
||||
def _pre_fw_hook(self, module: nn.Module, inputs: Any) -> None:
|
||||
# This is installed as a pre-fwd user hook with ``ModTracker.`` Based on the following cases we
|
||||
# set the state and capture the memory snapshot for the module.
|
||||
# Case 1: If the module is not in the ``memory_tracking`` dictionary, we track the parameters, buffers,
|
||||
# input and output memory of the module. Create a new ``_ModMemStats`` instance for the module
|
||||
# and add it to the ``memory_tracking`` dictionary.
|
||||
# Case 2: If the module is already in the ``memory_tracking`` dictionary and we are in backward, this means
|
||||
# we are in the AC region. We check if this is the top most module in the AC region. If it is,
|
||||
# we store a weak reference and set the flag ``_in_ac`` to True.
|
||||
# Case 3: If the module is already in the ``memory_tracking`` dictionary and we are in forward, this means
|
||||
# this module is called for the second time. If it is a root module, that means we are in the next
|
||||
# iteration and we error out. If it is not a root module, that means it's a submodule that is being
|
||||
# used multiple times in the same iteration, which we allow and track.
|
||||
# For Case 1 and 3, we also initialiaze the ``local_peak`` and ``PEAK_FW`` snapshot for the module.
|
||||
mod_name = self._mod_tracker.get_known_fqn(module)
|
||||
assert mod_name is not None
|
||||
if module not in self.memory_tracking:
|
||||
mod_stats = _ModMemStats(mod_name)
|
||||
param_mem, buffer_mem = self._track_module_params_and_buffers(
|
||||
module, install_grad_hooks=True
|
||||
)
|
||||
input_mem = self._track_inputs_or_outputs(inputs)
|
||||
mod_stats.parameter_mem = param_mem
|
||||
mod_stats.buffer_mem = buffer_mem
|
||||
mod_stats.input_mem = input_mem
|
||||
self.memory_tracking[module] = mod_stats
|
||||
state = _ModState.PRE_FW
|
||||
|
||||
elif self._mod_tracker.is_bw:
|
||||
mod_stats = self.memory_tracking[module]
|
||||
state = _ModState.PRE_FW_AC
|
||||
if self._ac_mod is None:
|
||||
self._ac_mod = weakref.ref(module)
|
||||
self._in_ac = True
|
||||
else:
|
||||
parents = set(self._mod_tracker.parents) - {mod_name}
|
||||
if len(parents) == 1 and "Global" in parents:
|
||||
raise NotImplementedError(
|
||||
"MemTracker does not support memory tracking for multiple iterative calls."
|
||||
" Either use ``reset_mod_stats`` to clear module memory stats for the previous iteration"
|
||||
" or file a github issue if you need this feature."
|
||||
)
|
||||
mod_stats = self.memory_tracking[module]
|
||||
state = _ModState.PRE_FW
|
||||
input_mem = self._track_inputs_or_outputs(inputs)
|
||||
mod_stats.mod_fqn = mod_name
|
||||
mod_stats.input_mem = input_mem
|
||||
|
||||
mem_snapshot = self.get_tracker_snapshot()
|
||||
if state == _ModState.PRE_FW:
|
||||
mod_stats.local_peak = {
|
||||
dev: dev_snap[_TOTAL_KEY] for dev, dev_snap in mem_snapshot.items()
|
||||
}
|
||||
mod_stats.snapshots.setdefault(_ModState.PEAK_FW, []).append(mem_snapshot)
|
||||
mod_stats.snapshots.setdefault(state, []).append(deepcopy(mem_snapshot))
|
||||
|
||||
def _post_fw_hook(self, module: nn.Module, inputs: Any, outputs: Any) -> None:
|
||||
# This is installed as a post-fwd user hook with ``ModTracker``. Based on the following cases we
|
||||
# set the state and capture the memory snapshot for the module.
|
||||
# Case 1: This is called in backward, which means we are in the AC region. If this is the top most module
|
||||
# in the AC region, we set the flag ``_in_ac`` to False.
|
||||
# Case 2: This is called in forward so we calculate the output memory
|
||||
# of the module and update its mod_stats.
|
||||
mod_stats = self.memory_tracking[module]
|
||||
if self._mod_tracker.is_bw:
|
||||
state = _ModState.POST_FW_AC
|
||||
if self._ac_mod is not None and self._ac_mod() is module:
|
||||
self._ac_mod = None
|
||||
self._in_ac = False
|
||||
else:
|
||||
state = _ModState.POST_FW
|
||||
output_mem = self._track_inputs_or_outputs(outputs)
|
||||
mod_stats.output_mem = output_mem
|
||||
mod_stats.snapshots.setdefault(state, []).append(self.get_tracker_snapshot())
|
||||
|
||||
def _pre_bw_hook(self, module: nn.Module, args: Any) -> None:
|
||||
# This is installed as a pre-bwd user hook with ``ModTracker``. We set the state and capture the
|
||||
# snapshot for the module. We also initialize the ``local_peak`` and ``PEAK_BW`` snapshot for it.
|
||||
# If the module is None, we skip the hook.
|
||||
# This can happen since this installed inside a multi-grad hook on the module's output tensors
|
||||
# and the module itself may not be alive during backward.
|
||||
if module is None:
|
||||
warnings.warn("Module is None. Skipping PRE_BW hook.", stacklevel=2)
|
||||
return
|
||||
mod_stats = self.memory_tracking[module]
|
||||
mem_snapshot = self.get_tracker_snapshot()
|
||||
mod_stats.local_peak = {
|
||||
dev: dev_snap[_TOTAL_KEY] for dev, dev_snap in mem_snapshot.items()
|
||||
}
|
||||
mod_stats.snapshots.setdefault(_ModState.PEAK_BW, []).append(mem_snapshot)
|
||||
mod_stats.snapshots.setdefault(_ModState.PRE_BW, []).append(
|
||||
deepcopy(mem_snapshot)
|
||||
)
|
||||
|
||||
def _post_bw_hook(self, module: nn.Module, args: Any) -> None:
|
||||
# This is installed as a post-bwd user hook with ``ModTracker``. We set the state and capture the
|
||||
# snapshot for the module if it is not None.
|
||||
# This can happen since this installed inside a multi-grad hook on the module's input tensors
|
||||
# and the module itself may not be alive during backward.
|
||||
if module is None:
|
||||
warnings.warn("Module is None. Skipping POST_BW hook.", stacklevel=2)
|
||||
return
|
||||
mod_stats = self.memory_tracking[module]
|
||||
mod_stats.snapshots.setdefault(_ModState.POST_BW, []).append(
|
||||
self.get_tracker_snapshot()
|
||||
)
|
||||
|
||||
def _track_optimizer_states(
|
||||
self, reftype: _RefType, optimizer: optim.Optimizer
|
||||
) -> None:
|
||||
for states in optimizer.state.values():
|
||||
for val in states.values():
|
||||
if isinstance(val, torch.Tensor):
|
||||
self._update_and_maybe_create_winfos(
|
||||
val,
|
||||
reftype,
|
||||
)
|
||||
|
||||
def _register_global_optimizer_hook(self) -> None:
|
||||
# Register a hook on the optimizer step to track the optimizer states.
|
||||
# The pre-hook is to set the flag ``_in_opt`` to True. The post-hook unsets the flag,
|
||||
# and also tracks any optimizer states that are created during the optimizer step.
|
||||
def _opt_step_pre_hook(
|
||||
optimizer: optim.Optimizer, args: Any, kwargs: Any
|
||||
) -> None:
|
||||
self._in_opt = True
|
||||
|
||||
def _opt_step_post_hook(
|
||||
optimizer: optim.Optimizer, args: Any, kwargs: Any
|
||||
) -> None:
|
||||
self._track_optimizer_states(_MemRefType.OPT, optimizer)
|
||||
self._in_opt = False
|
||||
|
||||
self._optimizer_hook_handles = (
|
||||
register_optimizer_step_pre_hook(_opt_step_pre_hook),
|
||||
register_optimizer_step_post_hook(_opt_step_post_hook),
|
||||
)
|
||||
|
||||
def _deregister_param_and_optimizer_hooks(self) -> None:
|
||||
for (
|
||||
grad_hook_handle,
|
||||
post_acc_grad_hook_handle,
|
||||
) in self._param_to_grad_hook_handles.values():
|
||||
grad_hook_handle.remove()
|
||||
post_acc_grad_hook_handle.remove()
|
||||
self._param_to_grad_hook_handles.clear()
|
||||
|
||||
if self._optimizer_hook_handles is not None:
|
||||
for handle in self._optimizer_hook_handles:
|
||||
handle.remove()
|
||||
self._optimizer_hook_handles = None
|
||||
|
||||
def track_external(
|
||||
self, *external: Union[nn.Module, optim.Optimizer, torch.Tensor]
|
||||
) -> None:
|
||||
"""
|
||||
Track tensors and stateful objects like modules, optimizers etc. that are created outside the MemTracker.
|
||||
|
||||
This method should be called before the ``MemTracker`` is used. Any tensors that are not module parameters, buffers,
|
||||
gradients activations, or optimizer states will be categorized as ``Other``. If you want them categorized with a
|
||||
custom name, please file a GitHub issue. Any tensors created outside the MemTracker and not supplied to this
|
||||
method will not be be tracked by ``MemTracker``.
|
||||
|
||||
Args:
|
||||
*external (Union[nn.Module, optim.Optimizer, torch.Tensor]): The external modules, optimizers, and
|
||||
tensors to be tracked.
|
||||
"""
|
||||
flat_external, _ = tree_flatten(external)
|
||||
for obj in flat_external:
|
||||
if isinstance(obj, torch.Tensor):
|
||||
self._update_and_maybe_create_winfos(
|
||||
obj,
|
||||
_MemRefType.OTH,
|
||||
)
|
||||
elif isinstance(obj, torch.nn.Module):
|
||||
self._track_module_params_and_buffers(obj, install_grad_hooks=False)
|
||||
elif isinstance(obj, optim.Optimizer):
|
||||
self._track_optimizer_states(_MemRefType.OPT, obj)
|
||||
elif obj is None:
|
||||
continue
|
||||
else:
|
||||
raise TypeError(
|
||||
f"Object of type {type(obj)} is not supported for tracking. "
|
||||
f"Only stateful objects like modules, optimizers, and tensors are supported."
|
||||
)
|
||||
|
||||
def display_snapshot(
|
||||
self, type: str = "current", units: str = "B", tabulate: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Display the memory usage breakdown snapshot of the tracker based on the specified type and units.
|
||||
|
||||
Keyword args:
|
||||
type (str): The type of snapshot to display. Can be "current" for the current memory usage or "peak" for the
|
||||
peak memory usage. Defaults to "current".
|
||||
units (str): The units to use for displaying memory usage. Defaults to "B". Supports ["B", "KiB", "MiB", "GiB"].
|
||||
tabulate (bool): Whether to display the snapshot in a tabular format. Defaults to False.
|
||||
"""
|
||||
snapshot = self.get_tracker_snapshot(type)
|
||||
if tabulate:
|
||||
_print_snapshot_tabular(snapshot, units)
|
||||
else:
|
||||
_print_snapshot(snapshot, units)
|
||||
|
||||
def display_modulewise_snapshots(
|
||||
self, depth: int = 2, units: str = "B", tabulate: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Print per device memory breakdown snapshot for each module called within MemTracker.
|
||||
|
||||
Snapshots are displayed for the states defined by ``_ModState``.
|
||||
The module hierarchy is displayed up to the specified depth.
|
||||
|
||||
Keyword Args:
|
||||
depth (int, optional): The depth of the module hierarchy to display. Defaults to 2.
|
||||
units (str, optional): The units to use for memory tracking. Defaults to "B". Supports ["B", "KiB", "MiB", "GiB"].
|
||||
tabulate (bool, optional): Whether to display the snapshot in a tabular format. Defaults to False.
|
||||
"""
|
||||
|
||||
def natural_sort_key(s: str) -> list[Union[int, str]]:
|
||||
return [
|
||||
int(text) if text.isdigit() else text.lower()
|
||||
for text in re.split("([0-9]+)", s)
|
||||
]
|
||||
|
||||
for mod_stats in sorted(
|
||||
self.memory_tracking.values(),
|
||||
key=lambda m_stats: natural_sort_key(m_stats.mod_fqn),
|
||||
):
|
||||
mod_fqn = mod_stats.mod_fqn
|
||||
mod_depth = mod_fqn.count(".") + 1
|
||||
if mod_depth > depth:
|
||||
continue
|
||||
print(f"Module: {mod_fqn}")
|
||||
if tabulate:
|
||||
_print_state_snapshots_tabular(mod_stats.snapshots, units)
|
||||
else:
|
||||
_print_state_snapshots(mod_stats.snapshots, units)
|
||||
|
||||
def reset_mod_stats(self) -> None:
|
||||
"""
|
||||
Reset all the module memory stats. Clears ``memory_tracking`` dictionary.
|
||||
"""
|
||||
self.memory_tracking.clear()
|
||||
|
||||
def _track_dtensor_dispatch(self) -> None:
|
||||
def track_dtensor_dispatch(
|
||||
op_call: torch._ops.OpOverload,
|
||||
args: tuple[object, ...],
|
||||
kwargs: dict[str, object],
|
||||
) -> object:
|
||||
with (
|
||||
self
|
||||
if op_call in DTensor._op_dispatcher._custom_op_handlers
|
||||
else nullcontext()
|
||||
):
|
||||
return self._orig_dtensor_dispatch(op_call, args, kwargs)
|
||||
|
||||
DTensor._op_dispatcher.dispatch = track_dtensor_dispatch # type: ignore[method-assign, assignment]
|
||||
|
||||
def _restore_dtensor_dispatch(self) -> None:
|
||||
DTensor._op_dispatcher.dispatch = self._orig_dtensor_dispatch # type: ignore[method-assign]
|
||||
|
||||
def __enter__(self) -> "MemTracker":
|
||||
if self._depth == 0:
|
||||
self._register_global_optimizer_hook()
|
||||
self._mod_tracker.register_user_hooks(
|
||||
self._pre_fw_hook,
|
||||
self._post_fw_hook,
|
||||
self._pre_bw_hook,
|
||||
self._post_bw_hook,
|
||||
)
|
||||
self._track_resize()
|
||||
self._track_dtensor_dispatch()
|
||||
self._peak_mem_snap = self.get_tracker_snapshot()
|
||||
self._peak_mem = {
|
||||
dev: dev_snap[_TOTAL_KEY]
|
||||
for dev, dev_snap in self._peak_mem_snap.items()
|
||||
}
|
||||
self._mod_tracker.__enter__()
|
||||
super().__enter__()
|
||||
self._depth += 1
|
||||
return self
|
||||
|
||||
def __exit__(self, *args: Any) -> None:
|
||||
self._depth -= 1
|
||||
if self._depth == 0:
|
||||
self._deregister_param_and_optimizer_hooks()
|
||||
self._mod_tracker.clear_user_hooks()
|
||||
self._restore_resize()
|
||||
self._restore_dtensor_dispatch()
|
||||
self._mod_tracker.__exit__(*args)
|
||||
super().__exit__(*args)
|
||||
|
||||
def __torch_dispatch__(self, func, types, args=(), kwargs=None): # type: ignore[no-untyped-def]
|
||||
if (
|
||||
func == torch.ops._c10d_functional.wait_tensor.default
|
||||
and active_fake_mode()
|
||||
):
|
||||
# N.B: This is a hacky way to override the Meta IMPL of wait_tensor. The original impl returns
|
||||
# a new tensor which does not happen in eager mode, when a wait_tensor is called.
|
||||
res = args[0]
|
||||
else:
|
||||
res = func(*args, **kwargs or {})
|
||||
# If we are tracking an optimizer state, we use the optimizer reference type.
|
||||
# If we are in backward region and not in AC region, we use the backward reference type.
|
||||
# Else we use the forward reference type.
|
||||
if self._in_opt:
|
||||
reftype = _MemRefType.OPT
|
||||
elif self._mod_tracker.is_bw and not self._in_ac:
|
||||
reftype = _MemRefType.TEMP
|
||||
else:
|
||||
reftype = _MemRefType.ACT
|
||||
tree_map_only(torch.Tensor, partial(self._track, reftype), res)
|
||||
peak_state = _ModState.PEAK_BW if self._mod_tracker.is_bw else _ModState.PEAK_FW
|
||||
self._update_peak_stats(peak_state)
|
||||
return res
|
|
@ -0,0 +1,296 @@
|
|||
# mypy: allow-untyped-defs
|
||||
import operator
|
||||
import pickle
|
||||
from collections import defaultdict
|
||||
from collections.abc import Sequence
|
||||
from itertools import chain
|
||||
from typing import Any, Callable, no_type_check, TYPE_CHECKING
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from torch.utils.hooks import RemovableHandle
|
||||
|
||||
|
||||
BYTES_PER_MB = 1024 * 1024.0
|
||||
|
||||
|
||||
class MemoryProfileDispatchMode(TorchDispatchMode):
|
||||
"""Run in ``TorchDispatchMode`` to get memory stats at operator level."""
|
||||
|
||||
def __init__(self, memory_tracker) -> None:
|
||||
self.memory_tracker = memory_tracker
|
||||
|
||||
def __torch_dispatch__(self, func, types, args=..., kwargs=None):
|
||||
rs = func(*args, **kwargs)
|
||||
if func == torch.ops.aten.detach.default:
|
||||
return rs
|
||||
func_name: str = (
|
||||
self.memory_tracker._cur_module_name
|
||||
+ "."
|
||||
+ func.__name__
|
||||
+ "_"
|
||||
+ str(self.memory_tracker._operator_names[func.__name__])
|
||||
)
|
||||
self.memory_tracker._operator_names[func.__name__] = (
|
||||
self.memory_tracker._operator_names[func.__name__] + 1
|
||||
)
|
||||
self.memory_tracker._record_memory_stats(func_name)
|
||||
|
||||
return rs
|
||||
|
||||
|
||||
class MemoryTracker:
|
||||
"""
|
||||
Collect and plot the memory stats at operator level.
|
||||
|
||||
Includes ``memories_allocated``, ``memories_active`` and ``memories_reserved``.
|
||||
It also prints a summary for the top 20 operators that generate the most memories.
|
||||
|
||||
Example usage:
|
||||
|
||||
>>> # xdoctest: +SKIP(failing)
|
||||
>>> net.cuda()
|
||||
>>> input = input.cuda()
|
||||
|
||||
>>> mem_tracker = MemoryTracker()
|
||||
>>> mem_tracker.start_monitor(net)
|
||||
|
||||
>>> net.zero_grad(True)
|
||||
>>> loss = net(input)
|
||||
>>> if isinstance(loss, dict):
|
||||
>>> loss = loss['out']
|
||||
>>> loss.sum().backward()
|
||||
>>> net.zero_grad(set_to_none=True)
|
||||
|
||||
>>> mem_tracker.stop()
|
||||
>>> mem_tracker.summary()
|
||||
>>> mem_tracker.show_traces()
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
torch._C._log_api_usage_once("torch.distributed.memory_tracker")
|
||||
self._hooks: list[RemovableHandle] = []
|
||||
self._operator_names: dict[str, int] = defaultdict(int)
|
||||
self.memories_allocated: dict[int, dict[str, float]] = defaultdict()
|
||||
self.memories_active: dict[int, dict[str, float]] = defaultdict()
|
||||
self.memories_reserved: dict[int, dict[str, float]] = defaultdict()
|
||||
self._markers: dict[str, int] = defaultdict(int)
|
||||
self._cur_module_name: str = ""
|
||||
self._op_index: int = 0
|
||||
self._num_cuda_retries: int = 0
|
||||
|
||||
@no_type_check
|
||||
def start_monitor(self, root_module: nn.Module) -> None:
|
||||
"""
|
||||
Register module hooks and entering ``MemoryProfileDispatchMode``.
|
||||
|
||||
This enables operator level memory stats can be tracked during module runtime.
|
||||
"""
|
||||
self._clear_state()
|
||||
root_module.__setattr__("_memory_tracker_is_root", True)
|
||||
for name, m in root_module.named_modules():
|
||||
if m is not root_module:
|
||||
m.__setattr__("_memory_tracker_is_root", False)
|
||||
# fused_proxy_group does not support hooks
|
||||
if ".fused_proxy_grouped_embedding_bag" in name:
|
||||
continue
|
||||
# hook ordering with other hooks added by users is not managed, so
|
||||
# the memory stats tracked here may not completely accurate.
|
||||
h1 = m.register_forward_pre_hook(self._create_pre_forward_hook(name))
|
||||
h2 = m.register_forward_hook(self._create_post_forward_hook(name))
|
||||
# it does not work well with jagged tensor somehow, the root cause is not
|
||||
# clear and remove it for now as it does not really capture important info.
|
||||
# h3 = m.register_backward_hook(self._create_backward_hook(name))
|
||||
self._hooks.extend([h1, h2])
|
||||
torch.cuda.empty_cache()
|
||||
assert getattr(self, "profile_mode", None) is None
|
||||
self.profile_mode = MemoryProfileDispatchMode(self)
|
||||
self.profile_mode.__enter__()
|
||||
|
||||
@no_type_check
|
||||
def stop(self) -> None:
|
||||
"""
|
||||
Remove module hooks and exit ``MemoryProfileDispatchMode`` to stop tracking memory stats at operator level.
|
||||
|
||||
Get some aggregated stats when the memory_tracker() is enabled, like cuda ``num_alloc_retries``.
|
||||
"""
|
||||
self._num_cuda_retries = torch.cuda.memory_stats().get("num_alloc_retries", 0)
|
||||
|
||||
for h in self._hooks:
|
||||
h.remove()
|
||||
self._hooks.clear()
|
||||
assert getattr(self, "profile_mode", None) is not None
|
||||
self.profile_mode.__exit__(None, None, None)
|
||||
self.profile_mode = None
|
||||
|
||||
@no_type_check
|
||||
def summary(self, top: int = 20) -> None:
|
||||
"""
|
||||
Print out the top operators that generate the most memories.
|
||||
|
||||
The number of the top operators can be configured.
|
||||
"""
|
||||
op_diff: dict[str, float] = defaultdict(float)
|
||||
op_name, previous_allocated_memory = self.memories_allocated[0]
|
||||
for i in range(1, self._op_index):
|
||||
op_name, current_allocated_memory = self.memories_allocated[i]
|
||||
op_diff[op_name] = current_allocated_memory - previous_allocated_memory
|
||||
previous_allocated_memory = current_allocated_memory
|
||||
|
||||
print("------------------------------------------------")
|
||||
print(f"The number of cuda retries are: {self._num_cuda_retries}")
|
||||
print(f"Top {top} ops that generates memory are:")
|
||||
for k, v in sorted(op_diff.items(), key=operator.itemgetter(1), reverse=True)[
|
||||
:top
|
||||
]:
|
||||
print(f"{k}: {v}MB")
|
||||
print("------------------------------------------------")
|
||||
|
||||
@no_type_check
|
||||
def show_traces(self, path: str = "") -> None:
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
def _plot_figure(x, y_values, labels):
|
||||
min_val = min(chain.from_iterable(y_values)) * 0.999
|
||||
max_val = max(chain.from_iterable(y_values)) * 1.001
|
||||
plt.figure()
|
||||
for y, label in zip(y_values, labels):
|
||||
plt.plot(x, y, label=label)
|
||||
plt.xlabel("# Operator Calls")
|
||||
plt.ylabel("Memory (MB)")
|
||||
plt.legend()
|
||||
for marker_name, marker in self._markers.items():
|
||||
if marker_name == "fw_bw_boundary":
|
||||
plt.plot(
|
||||
[marker, marker],
|
||||
[min_val, max_val],
|
||||
"r",
|
||||
lw=2,
|
||||
label=marker_name,
|
||||
)
|
||||
else:
|
||||
plt.plot(
|
||||
[marker, marker],
|
||||
[min_val, max_val],
|
||||
"k-",
|
||||
lw=2,
|
||||
label=marker_name,
|
||||
)
|
||||
|
||||
if path != "":
|
||||
self.load(path)
|
||||
|
||||
y_1 = [gb for (name, gb) in self.memories_allocated.values()]
|
||||
y_2 = [gb for (name, gb) in self.memories_active.values()]
|
||||
y_3 = [gb for (name, gb) in self.memories_reserved.values()]
|
||||
x = list(range(len(y_1)))
|
||||
# Split figures when there is big difference between
|
||||
# "reserved_memory" and "allocated_memory" or "active_memory".
|
||||
_plot_figure(
|
||||
x,
|
||||
[list(y_1), list(y_2), list(y_3)],
|
||||
["allocated_memory", "active_memory", "reserved_memory"],
|
||||
)
|
||||
_plot_figure(x, [list(y_1)], ["allocated_memory"])
|
||||
_plot_figure(x, [list(y_2)], ["active_memory"])
|
||||
_plot_figure(x, [list(y_3)], ["reserved_memory"])
|
||||
|
||||
def save_stats(self, path: str) -> None:
|
||||
"""Save the stats using pickle during runtime if users want to plot the traces in other places like notebook."""
|
||||
stats = {
|
||||
"memories_allocated": self.memories_allocated,
|
||||
"memories_active": self.memories_active,
|
||||
"memories_reserved": self.memories_reserved,
|
||||
"markers": self._markers,
|
||||
"num_alloc_retries": self._num_cuda_retries,
|
||||
}
|
||||
|
||||
with open(path, "wb") as f:
|
||||
pickle.dump(stats, f, pickle.HIGHEST_PROTOCOL)
|
||||
|
||||
def load(self, path: str) -> None:
|
||||
"""Load the pickled memory stats to plot the traces or print the summary."""
|
||||
with open(path, "rb") as f:
|
||||
stats = pickle.load(f)
|
||||
|
||||
self.memories_allocated = stats["memories_allocated"]
|
||||
self.memories_active = stats["memories_active"]
|
||||
self.memories_reserved = stats["memories_reserved"]
|
||||
self._markers = stats["markers"]
|
||||
self._num_cuda_retries = stats["num_alloc_retries"]
|
||||
|
||||
def _create_pre_forward_hook(self, name: str) -> Callable:
|
||||
"""Prefix operator name with current module and 'forward', and insert 'fw_start' marker at forward pass start."""
|
||||
|
||||
def _pre_forward_hook(module: nn.Module, inputs: Any) -> None:
|
||||
self._cur_module_name = f"{name}.forward"
|
||||
if (
|
||||
hasattr(module, "_memory_tracker_is_root")
|
||||
and module._memory_tracker_is_root
|
||||
):
|
||||
self._add_marker("fw_start")
|
||||
|
||||
return _pre_forward_hook
|
||||
|
||||
def _create_post_forward_hook(self, name: str) -> Callable:
|
||||
"""Insert the marker 'fw_bw_boundary' at the boundary of forward and backward pass."""
|
||||
|
||||
def _post_forward_hook(
|
||||
module: nn.Module,
|
||||
inputs: Sequence[torch.Tensor],
|
||||
outputs: Sequence[torch.Tensor],
|
||||
) -> None:
|
||||
if (
|
||||
hasattr(module, "_memory_tracker_is_root")
|
||||
and module._memory_tracker_is_root
|
||||
):
|
||||
self._add_marker("fw_bw_boundary")
|
||||
|
||||
return _post_forward_hook
|
||||
|
||||
def _create_backward_hook(self, name: str) -> Callable:
|
||||
"""Insert the current module name with backward prefix for the operator name."""
|
||||
|
||||
def _backward_hook(
|
||||
module: nn.Module, grad_input: torch.Tensor, grad_output: torch.Tensor
|
||||
) -> None:
|
||||
self._cur_module_name = f"{name}.backward"
|
||||
|
||||
return _backward_hook
|
||||
|
||||
@no_type_check
|
||||
def _record_memory_stats(self, fn_name: str) -> None:
|
||||
"""
|
||||
Record current memory allocated, current memory active and current memory reserved.
|
||||
|
||||
The memory stats dict is indexed with ``self._op_index``.
|
||||
"""
|
||||
memory_allocated: float = torch.cuda.memory_allocated() / BYTES_PER_MB
|
||||
memory_reserved: float = torch.cuda.memory_reserved() / BYTES_PER_MB
|
||||
memory_active: float = (
|
||||
torch.cuda.memory_stats().get("active_bytes.all.current", 0) / BYTES_PER_MB
|
||||
)
|
||||
self.memories_allocated[self._op_index] = (fn_name, memory_allocated)
|
||||
self.memories_reserved[self._op_index] = (fn_name, memory_reserved)
|
||||
self.memories_active[self._op_index] = (fn_name, memory_active)
|
||||
self._op_index += 1
|
||||
|
||||
def _add_marker(self, marker_name: str) -> None:
|
||||
"""Set the marker's x-axis value."""
|
||||
marker_val = len(self.memories_allocated.values())
|
||||
self._markers[marker_name] = marker_val
|
||||
|
||||
def _clear_state(self) -> None:
|
||||
"""Clear states when start_monitor() is called."""
|
||||
self._operator_names.clear()
|
||||
self.memories_allocated.clear()
|
||||
self.memories_active.clear()
|
||||
self.memories_reserved.clear()
|
||||
self._markers.clear()
|
||||
self._cur_module_name = ""
|
||||
self._op_index = 0
|
||||
self._num_cuda_retries = 0
|
251
venv/Lib/site-packages/torch/distributed/_tools/mod_tracker.py
Normal file
251
venv/Lib/site-packages/torch/distributed/_tools/mod_tracker.py
Normal file
|
@ -0,0 +1,251 @@
|
|||
# mypy: allow-untyped-defs
|
||||
import warnings
|
||||
import weakref
|
||||
from typing import Callable, Optional
|
||||
|
||||
import torch
|
||||
from torch.autograd.graph import register_multi_grad_hook
|
||||
from torch.nn.modules.module import (
|
||||
register_module_forward_hook,
|
||||
register_module_forward_pre_hook,
|
||||
)
|
||||
from torch.utils._pytree import tree_flatten
|
||||
|
||||
|
||||
__all__ = ["ModTracker"]
|
||||
|
||||
|
||||
class ModTracker:
|
||||
"""
|
||||
``ModTracker`` is a context manager that tracks the nn.Module hierarchy during execution
|
||||
so that other system can query which Module is currently being executed (or its backward is being
|
||||
executed).
|
||||
|
||||
You can access the ``parents`` attribute on this context manager to get the set of all the
|
||||
Modules currently being executed via their fqn (fully qualified name, also used as the key within
|
||||
the state_dict).
|
||||
You can access the ``is_bw`` attribute to know if you are currently running in backward or not.
|
||||
|
||||
Note that ``parents`` is never empty and always contains the "Global" key. The ``is_bw`` flag
|
||||
will remain ``True`` after the forward until another Module is executed. If you need it to be
|
||||
more accurate, please submit an issue requesting this. Adding a map from fqn to the module instance
|
||||
is possible but not done yet, please submit an issue requesting this if you need it.
|
||||
|
||||
Example usage
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
mod = torch.nn.Linear(2, 2)
|
||||
|
||||
with ModTracker() as tracker:
|
||||
# Access anything during the forward pass
|
||||
def my_linear(m1, m2, bias):
|
||||
print(f"Current modules: {tracker.parents}")
|
||||
return torch.mm(m1, m2.t()) + bias
|
||||
|
||||
torch.nn.functional.linear = my_linear
|
||||
|
||||
mod(torch.rand(2, 2))
|
||||
|
||||
"""
|
||||
|
||||
parents: set[str]
|
||||
"""
|
||||
A Set containing the fqn for each module currently running their forward
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.parents = {"Global"}
|
||||
self._active_module_cnt = {}
|
||||
self._known_modules: weakref.WeakKeyDictionary = weakref.WeakKeyDictionary()
|
||||
self._seen_modules: weakref.WeakSet = weakref.WeakSet()
|
||||
self._has_callback = False
|
||||
self._post_bw_callbacks_to_enqueue: list[Callable] = []
|
||||
self._user_pre_fw_hook = None
|
||||
self._user_post_fw_hook = None
|
||||
self._user_pre_bw_hook = None
|
||||
self._user_post_bw_hook = None
|
||||
|
||||
def _maybe_set_engine_callback(self):
|
||||
# This assumes no concurrent calls to backward
|
||||
if self._has_callback:
|
||||
return
|
||||
|
||||
for post_bw_callback in reversed(self._post_bw_callbacks_to_enqueue):
|
||||
torch.autograd.Variable._execution_engine.queue_callback(post_bw_callback)
|
||||
self._post_bw_callbacks_to_enqueue.clear()
|
||||
|
||||
def callback():
|
||||
self.parents = {"Global"}
|
||||
self._has_callback = False
|
||||
|
||||
torch.autograd.Variable._execution_engine.queue_callback(callback)
|
||||
self._has_callback = True
|
||||
|
||||
@property
|
||||
def is_bw(self):
|
||||
"""
|
||||
A boolean marking if this is currently running during the backward pass or not
|
||||
"""
|
||||
return torch._C._current_graph_task_id() != -1
|
||||
|
||||
def get_known_fqn(self, mod):
|
||||
"""
|
||||
Return the fqn for the given module if it is known to the ``ModTracker``, otherwise ``None``.
|
||||
"""
|
||||
return self._known_modules.get(mod, None)
|
||||
|
||||
def register_user_hooks(
|
||||
self,
|
||||
pre_fw_hook: Optional[Callable] = None,
|
||||
post_fw_hook: Optional[Callable] = None,
|
||||
pre_bw_hook: Optional[Callable] = None,
|
||||
post_bw_hook: Optional[Callable] = None,
|
||||
):
|
||||
"""
|
||||
Registers user-specified hooks to be called before/after the forward/backward pass for each
|
||||
module tracked by the ``ModTracker``. One or more can be ``None``.
|
||||
Args:
|
||||
pre_fw_hook (Callable, optional): A hook to be called before the forward pass for the
|
||||
module. It should have the following signature:
|
||||
pre_fw_hook (module, input) -> None
|
||||
post_fw_hook (Callable, optional): A hook to be called after the forward pass for the
|
||||
module. It should have the following signature:
|
||||
post_fw_hook (module, input, output) -> None
|
||||
pre_bw_hook (Callable, optional): A multi-grad hook to be called on all the outputs of
|
||||
the module that require gradients. It should have the following signature:
|
||||
pre_bw_hook (module, grad_output) -> None
|
||||
post_bw_hook (Callable, optional): A multi-grad hook to be called on all the inputs of
|
||||
the module that require gradients. It should have the following signature:
|
||||
post_bw_hook (module, grad_input) -> None
|
||||
Raises:
|
||||
AssertionError: If a new hook is provided when one is already registered.
|
||||
Note:
|
||||
If the module is not alive during the backward pass, the pre_bw_hook and post_bw_hook will
|
||||
will receive None as the module argument.
|
||||
The module fqn will be present in the ``parents`` attribute when each of the hooks is called.
|
||||
Hooks are intended to be used as markers only not to modify the inputs/outputs.
|
||||
"""
|
||||
|
||||
def set_hook(hook, user_hook, hook_name):
|
||||
if hook is not None and user_hook is not None:
|
||||
raise AssertionError(
|
||||
f"Only one {hook_name} can be registered at a time"
|
||||
f" Clear the existing hook by calling ``clear_user_hooks`` before registering a new one"
|
||||
)
|
||||
return hook
|
||||
|
||||
self._user_pre_fw_hook = set_hook(
|
||||
pre_fw_hook, self._user_pre_fw_hook, "pre_fw_hook"
|
||||
)
|
||||
self._user_post_fw_hook = set_hook(
|
||||
post_fw_hook, self._user_post_fw_hook, "post_fw_hook"
|
||||
)
|
||||
self._user_pre_bw_hook = set_hook(
|
||||
pre_bw_hook, self._user_pre_bw_hook, "pre_bw_hook"
|
||||
)
|
||||
self._user_post_bw_hook = set_hook(
|
||||
post_bw_hook, self._user_post_bw_hook, "post_bw_hook"
|
||||
)
|
||||
|
||||
def clear_user_hooks(self):
|
||||
"""
|
||||
Clears the user specified hooks registered with ``register_user_hooks``
|
||||
"""
|
||||
self._user_pre_fw_hook = None
|
||||
self._user_post_fw_hook = None
|
||||
self._user_pre_bw_hook = None
|
||||
self._user_post_bw_hook = None
|
||||
|
||||
def _get_mod_name(self, mod):
|
||||
if mod not in self._known_modules:
|
||||
self._known_modules[mod] = type(mod).__name__
|
||||
mod_name = self._known_modules[mod]
|
||||
if mod not in self._seen_modules:
|
||||
for name, submod in mod.named_children():
|
||||
self._known_modules[submod] = f"{mod_name}.{name}"
|
||||
self._get_mod_name(submod)
|
||||
self._seen_modules.add(mod)
|
||||
return mod_name
|
||||
|
||||
def _get_append_fn(self, w_mod, name, is_bw):
|
||||
def fn(*args):
|
||||
if is_bw:
|
||||
self._maybe_set_engine_callback()
|
||||
if name in self.parents and not self.is_bw:
|
||||
|
||||
def custom_formatwarning(msg, category, filename, lineno, line=None):
|
||||
return f"{filename}:{lineno}: {category.__name__}: {msg} \n"
|
||||
|
||||
warnings.formatwarning = custom_formatwarning
|
||||
warnings.warn(
|
||||
"The module hierarchy tracking maybe be messed up."
|
||||
" Please file a bug to PyTorch, if it is the case."
|
||||
)
|
||||
if name not in self.parents:
|
||||
self._active_module_cnt[name] = 1
|
||||
self.parents.add(name)
|
||||
else:
|
||||
self._active_module_cnt[name] += 1
|
||||
|
||||
if self._user_pre_bw_hook is not None and is_bw:
|
||||
self._user_pre_bw_hook(w_mod(), args)
|
||||
|
||||
return fn
|
||||
|
||||
def _get_pop_fn(self, w_mod, name, is_bw):
|
||||
def fn(*args):
|
||||
if self._user_post_bw_hook is not None and is_bw:
|
||||
self._user_post_bw_hook(w_mod(), args)
|
||||
if name in self.parents:
|
||||
self._active_module_cnt[name] -= 1
|
||||
if self._active_module_cnt[name] == 0:
|
||||
self.parents.remove(name)
|
||||
elif not self.is_bw:
|
||||
# Due to some input/output not requiring gradients, we cannot enforce
|
||||
# proper nesting in backward
|
||||
raise RuntimeError(
|
||||
"The Module hierarchy tracking is wrong. Report a bug to PyTorch"
|
||||
)
|
||||
|
||||
return fn
|
||||
|
||||
def _fw_pre_hook(self, mod, input):
|
||||
name = self._get_mod_name(mod)
|
||||
w_mod = weakref.ref(mod)
|
||||
self._get_append_fn(w_mod, name, False)()
|
||||
if self._user_pre_fw_hook is not None:
|
||||
self._user_pre_fw_hook(mod, input)
|
||||
args, _ = tree_flatten(input)
|
||||
tensors = [a for a in args if isinstance(a, torch.Tensor) and a.requires_grad]
|
||||
if not self.is_bw:
|
||||
if tensors:
|
||||
register_multi_grad_hook(tensors, self._get_pop_fn(w_mod, name, True))
|
||||
else:
|
||||
self._post_bw_callbacks_to_enqueue.append(
|
||||
self._get_pop_fn(w_mod, name, True)
|
||||
)
|
||||
|
||||
def _fw_post_hook(self, mod, input, output):
|
||||
name = self._get_mod_name(mod)
|
||||
w_mod = weakref.ref(mod)
|
||||
if self._user_post_fw_hook is not None:
|
||||
self._user_post_fw_hook(mod, input, output)
|
||||
self._get_pop_fn(w_mod, name, False)()
|
||||
args, _ = tree_flatten(output)
|
||||
tensors = [a for a in args if isinstance(a, torch.Tensor) and a.requires_grad]
|
||||
if not self.is_bw and tensors:
|
||||
register_multi_grad_hook(
|
||||
tensors, self._get_append_fn(w_mod, name, True), mode="any"
|
||||
)
|
||||
|
||||
def __enter__(self):
|
||||
self._fw_pre_handle = register_module_forward_pre_hook(self._fw_pre_hook)
|
||||
self._fw_post_handle = register_module_forward_hook(
|
||||
self._fw_post_hook, always_call=True
|
||||
)
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self._fw_pre_handle.remove()
|
||||
self._fw_post_handle.remove()
|
|
@ -0,0 +1,527 @@
|
|||
# Owner(s): ["module: unknown"]
|
||||
import math
|
||||
import os
|
||||
from collections import defaultdict
|
||||
from typing import Any, Callable
|
||||
from typing_extensions import Self
|
||||
|
||||
import torch
|
||||
import torch.utils._pytree as pytree
|
||||
from torch._guards import active_fake_mode
|
||||
from torch._inductor.utils import get_device_tflops, get_gpu_dram_gbps
|
||||
from torch._subclasses.fake_tensor import FakeTensorMode
|
||||
from torch.distributed._tools.mod_tracker import ModTracker
|
||||
from torch.utils._mode_utils import no_dispatch
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
from torch.utils.flop_counter import flop_registry
|
||||
|
||||
|
||||
aten = torch.ops.aten
|
||||
|
||||
# This value is hard-coded here:
|
||||
# https://github.com/pytorch/pytorch/blob/5fba5d83f0703ff8077ab65448a998e9ad6598fd/c10/cuda/CUDACachingAllocator.cpp#L117
|
||||
_PYTORCH_MIN_ALLOCATE = (
|
||||
2**9 if int(os.environ.get("PYTORCH_NO_CUDA_MEMORY_CACHING", 0)) == 0 else 1
|
||||
)
|
||||
|
||||
# No fall-back kernel needed/exists for view ops
|
||||
_VIEW_OPS = {
|
||||
aten.lift_fresh,
|
||||
aten.t,
|
||||
aten.transpose,
|
||||
aten.view,
|
||||
aten.detach,
|
||||
aten._unsafe_view,
|
||||
aten.split,
|
||||
aten.adjoint,
|
||||
aten.as_strided,
|
||||
aten.diagonal,
|
||||
aten.expand,
|
||||
aten.expand_as,
|
||||
aten.movedim,
|
||||
aten.permute,
|
||||
aten.select,
|
||||
aten.squeeze,
|
||||
aten.mT,
|
||||
aten.mH,
|
||||
aten.real,
|
||||
aten.imag,
|
||||
aten.view_as,
|
||||
aten.unflatten,
|
||||
aten.unfold,
|
||||
aten.unbind,
|
||||
aten.unsqueeze,
|
||||
aten.vsplit,
|
||||
aten.hsplit,
|
||||
aten.split_with_sizes,
|
||||
aten.swapaxes,
|
||||
aten.swapdims,
|
||||
aten.chunk,
|
||||
}
|
||||
# We can ignore benchmarking tensor create ops
|
||||
_CREATE_OPS = {
|
||||
aten.randint,
|
||||
aten.randn,
|
||||
aten.rand,
|
||||
aten.randn_like,
|
||||
aten.rand_like,
|
||||
aten.randint_like,
|
||||
aten.arange,
|
||||
aten.ones_like,
|
||||
aten.zeros_like,
|
||||
}
|
||||
|
||||
_IGNORE_OPS = _VIEW_OPS | _CREATE_OPS
|
||||
|
||||
__all__ = ["RuntimeEstimator"]
|
||||
|
||||
|
||||
class RuntimeEstimator(TorchDispatchMode):
|
||||
"""
|
||||
Estimates the GPU runtime in milliseconds using various estimation methods under the ``FakeTensorMode``.
|
||||
|
||||
This class provides a ``TorchDispatchMode`` based context manager that can be used to estimate the eager
|
||||
runtime of PyTorch functions. It supports two estimation modes, benchmarking (`operator-level-benchmark`) and
|
||||
roofline cost modeling (`operator-level-cost-model`).
|
||||
For modules executed under this context manager, it agggregates the forward and backward operation runtimes
|
||||
and also records their execution orders.
|
||||
|
||||
Attributes:
|
||||
mod_runtimes (Dict[str, Dict[str, float]]): A dictionary of module runtimes. The key to the outer dictionary
|
||||
is the fully qualified name (FQN) of the module. For each module the forward and backward runtimes of the
|
||||
operations are aggregated in the inner dictionary keyed by 'fw' and 'bw'.
|
||||
mod_fw_pre_order (List[str]): List of module FQNs in pre-forward execution order.
|
||||
mod_bw_pre_order (List[str]): List of module FQNs in pre-backward execution order.
|
||||
mod_fw_post_order (List[str]): List of module FQNs in post-forward execution order.
|
||||
mod_bw_post_order (List[str]): List of module FQNs in post-backward execution order.
|
||||
total_runtime (float): The total estimated runtime in milliseconds.
|
||||
|
||||
Note:
|
||||
1) The benchmarking estimate mode will execute kernels on GPU and assumes that every operation can run in
|
||||
isolation without causing an OOM error. It is also designed to be used only under ``FakeTensorMode``.
|
||||
2) Currently wrapper tensor sub-classes such as ``DTensor`` won't produce correct estimates. We plan to support
|
||||
them in future PRs.
|
||||
3) We only estimate the compute time, if your code has communication, it will not be considered. Again, we will
|
||||
support this in future PRs.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
runtime_estimator = RuntimeEstimator()
|
||||
with FakeTensorMode():
|
||||
module = ...
|
||||
optimizer = ...
|
||||
inp = ...
|
||||
with runtime_estimator(estimate_mode_type="operator-level-cost-model"):
|
||||
loss = module(inp)
|
||||
loss.backward()
|
||||
optimizer.step()
|
||||
optimizer.zero_grad()
|
||||
runtime_estimator.display_modulewise_stats()
|
||||
"""
|
||||
|
||||
_float_types: set[torch.dtype] = {
|
||||
torch.float16,
|
||||
torch.bfloat16,
|
||||
torch.float32,
|
||||
torch.float64,
|
||||
}
|
||||
_no_fallback_kernel: set[torch._ops._OpNamespace] = set()
|
||||
fake_mode: FakeTensorMode
|
||||
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._estimate: Callable
|
||||
self._estimate_mode_type: str
|
||||
self._mod_tracker = ModTracker()
|
||||
self.mod_runtimes: dict[str, dict[str, float]] = defaultdict(
|
||||
lambda: defaultdict(lambda: 0.0)
|
||||
)
|
||||
self.mod_fw_pre_order: list[str] = []
|
||||
self.mod_bw_pre_order: list[str] = []
|
||||
self.mod_fw_post_order: list[str] = []
|
||||
self.mod_bw_post_order: list[str] = []
|
||||
self.total_runtime: float = 0.0
|
||||
|
||||
# Adapted from: https://github.com/pytorch/pytorch/blob/9b902b3ee3bd608a19543362b66bf06c373dd374/torch/_subclasses/fake_tensor.py#L1969 # noqa: PGH004,B950
|
||||
# NB: returns fake tensors
|
||||
@classmethod
|
||||
def _maybe_run_and_benchmark_fallback_kernel( # type: ignore[no-untyped-def]
|
||||
cls,
|
||||
func,
|
||||
args,
|
||||
kwargs,
|
||||
orig_not_implemented_exception,
|
||||
):
|
||||
"""
|
||||
Runs and benchmarks a fallback kernel for a given function.
|
||||
|
||||
Args:
|
||||
func (Callable): The function to benchmark.
|
||||
args (Tuple): The arguments to pass to the function.
|
||||
kwargs (Dict[str, Any]): The keyword arguments to pass to the function.
|
||||
orig_not_implemented_exception (Exception): The original exception to raise if the fallback kernel
|
||||
is not implemented.
|
||||
|
||||
Returns:
|
||||
Tuple[Any, float]: A tuple containing the result of the function and
|
||||
the mean operation time in milliseconds.
|
||||
"""
|
||||
# these should all be supported, just to be safe
|
||||
# avoid fallback for operators which inplace modify metadata
|
||||
# because the input fake tensors would be umodified
|
||||
if torch.Tag.inplace_view in func.tags: # type: ignore[attr-defined]
|
||||
raise orig_not_implemented_exception
|
||||
|
||||
inp_impls = {}
|
||||
flat_args, args_spec = pytree.tree_flatten((args, kwargs))
|
||||
# Don't use in_kernel_invocation_manager(fake_mode) as we want to do
|
||||
# REAL compute (not with meta device)
|
||||
with no_dispatch():
|
||||
|
||||
def to_real_tensor(e): # type: ignore[no-untyped-def]
|
||||
if cls.fake_mode.is_our_fake(e):
|
||||
if e.dtype in cls._float_types:
|
||||
out = torch.rand_like(e, device=e.fake_device)
|
||||
else:
|
||||
out = torch.ones_like(e, device=e.fake_device)
|
||||
if e.is_sparse:
|
||||
out._coalesced_(e.is_coalesced())
|
||||
inp_impls[id(out)] = e
|
||||
return out
|
||||
return e
|
||||
|
||||
flat_args = [to_real_tensor(a) for a in flat_args]
|
||||
args, kwargs = pytree.tree_unflatten(flat_args, args_spec)
|
||||
r = func(*args, **kwargs)
|
||||
warmup_iters, actual_iters = 2, 3
|
||||
for _ in range(warmup_iters):
|
||||
func(*args, **kwargs)
|
||||
start_event = torch.cuda.Event(enable_timing=True)
|
||||
end_event = torch.cuda.Event(enable_timing=True)
|
||||
start_event.record(torch.cuda.current_stream())
|
||||
for _ in range(actual_iters):
|
||||
func(*args, **kwargs)
|
||||
end_event.record(torch.cuda.current_stream())
|
||||
torch.cuda.synchronize()
|
||||
cuda_time = start_event.elapsed_time(end_event)
|
||||
mean_op_time = cuda_time / actual_iters
|
||||
|
||||
storages = set()
|
||||
|
||||
for e in flat_args:
|
||||
if isinstance(e, torch.Tensor):
|
||||
if not e.is_sparse:
|
||||
storages.add(e._typed_storage()._cdata)
|
||||
|
||||
# TODO: also check metadata change on inputs
|
||||
# proper aliasing/metadata relationship between outputs and inputs will
|
||||
# not be set up, bc of conversion to device, unless we can reuse an
|
||||
# input impl
|
||||
|
||||
def map_out(e): # type: ignore[no-untyped-def]
|
||||
if id(e) not in inp_impls and (
|
||||
isinstance(e, torch.Tensor)
|
||||
and not e.is_sparse
|
||||
and e._typed_storage()._cdata in storages
|
||||
):
|
||||
raise orig_not_implemented_exception
|
||||
|
||||
if isinstance(e, torch.Tensor):
|
||||
if id(e) in inp_impls:
|
||||
return inp_impls[id(e)]
|
||||
else:
|
||||
return cls.fake_mode.fake_tensor_converter.from_real_tensor(
|
||||
cls.fake_mode, e
|
||||
)
|
||||
else:
|
||||
return e
|
||||
|
||||
return (pytree.tree_map(map_out, r), mean_op_time)
|
||||
|
||||
@classmethod
|
||||
def _benchmark_estimate(cls, func, args, kwargs) -> tuple[Any, float]: # type: ignore[no-untyped-def]
|
||||
"""
|
||||
Estimates the runtime of a function using benchmarking.
|
||||
|
||||
Args:
|
||||
func: The function to estimate.
|
||||
args: The arguments to pass to the function.
|
||||
kwargs: The keyword arguments to pass to the function.
|
||||
res: The result of the function.
|
||||
|
||||
Returns:
|
||||
Tuple[Any, float]: A tuple containing the result of the function and
|
||||
the mean operation time in milliseconds.
|
||||
"""
|
||||
assert isinstance(cls.fake_mode, FakeTensorMode), (
|
||||
"Initialize/Assign FakeTensorMode before using this function"
|
||||
)
|
||||
mean_op_time = 0.0
|
||||
if func._overloadpacket not in _VIEW_OPS:
|
||||
try:
|
||||
res, mean_op_time = cls._maybe_run_and_benchmark_fallback_kernel(
|
||||
func,
|
||||
args,
|
||||
kwargs,
|
||||
NotImplementedError,
|
||||
)
|
||||
return (res, mean_op_time)
|
||||
except NotImplementedError:
|
||||
cls._no_fallback_kernel.add(func._overloadpacket)
|
||||
res = func(*args, **kwargs or {})
|
||||
return (res, mean_op_time)
|
||||
|
||||
# Adapted from: https://github.com/pytorch/pytorch/blob/9b902b3ee3bd608a19543362b66bf06c373dd374/torch/_inductor/scheduler.py#L589 # noqa: PGH004,B950
|
||||
@classmethod
|
||||
def _roofline_estimate(cls, func, args, kwargs) -> tuple[Any, float]: # type: ignore[no-untyped-def]
|
||||
"""
|
||||
Estimates the runtime of a function using a roofline cost model.
|
||||
|
||||
Args:
|
||||
func: The function to estimate.
|
||||
args: The arguments to pass to the function.
|
||||
kwargs: The keyword arguments to pass to the function.
|
||||
out: The output of the function.
|
||||
|
||||
Returns:
|
||||
Tuple[Any, float]: A tuple containing the result of the function and
|
||||
the mean operation time in milliseconds.
|
||||
"""
|
||||
assert torch.cuda.is_available(), (
|
||||
"Roofline estimation needs to access CUDA capabilities to make estimations"
|
||||
)
|
||||
|
||||
def get_num_bytes(t: torch.Tensor) -> int:
|
||||
"""
|
||||
Calculates the memory consumption of a tensor.
|
||||
|
||||
Args:
|
||||
t (torch.Tensor): The input tensor.
|
||||
|
||||
Returns:
|
||||
int: The memory consumption of the tensor in bytes.
|
||||
"""
|
||||
num_bytes = t.untyped_storage().nbytes()
|
||||
mem_consumed = (
|
||||
math.ceil(num_bytes / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE
|
||||
)
|
||||
return mem_consumed
|
||||
|
||||
def get_compute_time(func_packet, args, kwargs, out, out_dtypes) -> float: # type: ignore[no-untyped-def]
|
||||
"""
|
||||
Estimates the compute time of an aten operator.
|
||||
|
||||
Args:
|
||||
func_packet: The operator overload packet.
|
||||
args: The arguments to the operator.
|
||||
kwargs: The keyword arguments to the operator.
|
||||
out: The output of the operator.
|
||||
out_dtypes: The output data types.
|
||||
|
||||
Returns:
|
||||
float: The estimated compute time in nanoseconds.
|
||||
"""
|
||||
if func_packet in flop_registry:
|
||||
assert len(out_dtypes) == 1, (
|
||||
f"Only support single out dtype got {out_dtypes} for {func_packet}"
|
||||
)
|
||||
dtype = out_dtypes.pop()
|
||||
# This actually gives peta-FLOPs/s hence multiply by 1e15 to get the FLOPs/s
|
||||
peak_gpu_flops = get_device_tflops(dtype) * 1e15
|
||||
# We can expect to achieve 75% of theoretical peak flops
|
||||
factor = 0.75
|
||||
peak_empirical_flops = factor * peak_gpu_flops
|
||||
flop_count_func = flop_registry[func_packet]
|
||||
# We divide by a factor of 2 to get the MACs (multiply and accumulate)
|
||||
flop_count = flop_count_func(*args, **kwargs, out_val=out) / 2
|
||||
# We multiply by 1e9 to get the time in nano seconds
|
||||
compute_time = (flop_count / peak_empirical_flops) * 1e9
|
||||
return compute_time
|
||||
return 0.0
|
||||
|
||||
def get_transfer_time(flat_args_kwargs, flat_outs) -> float: # type: ignore[no-untyped-def]
|
||||
"""
|
||||
Estimates the memory transfer time of input and output tensors.
|
||||
|
||||
Args:
|
||||
flat_args_kwargs (List[torch.Tensor]): The flat list of arguments and keyword arguments.
|
||||
flat_outs (List[torch.Tensor]): The flat list of outputs.
|
||||
|
||||
Returns:
|
||||
float: The estimated memory transfer time in nanoseconds.
|
||||
"""
|
||||
gpu_memory_bandwidth = get_gpu_dram_gbps()
|
||||
read_bytes = sum(
|
||||
get_num_bytes(t)
|
||||
for t in flat_args_kwargs
|
||||
if isinstance(t, torch.Tensor)
|
||||
)
|
||||
write_bytes = sum(
|
||||
get_num_bytes(t) for t in flat_outs if isinstance(t, torch.Tensor)
|
||||
)
|
||||
counted_bytes = read_bytes + write_bytes
|
||||
# The GPU memory bandwidth is in GB/s so the transfer time is in nanoseconds
|
||||
transfer_time = counted_bytes / gpu_memory_bandwidth
|
||||
return transfer_time
|
||||
|
||||
# Roofline Cost Model Explanation
|
||||
|
||||
# The roofline cost model estimates the execution time of an operator based on
|
||||
# the device's empirical maximum FLOPs/sec (pi) and device DRAM bandwidth (beta).
|
||||
|
||||
# Variables:
|
||||
# - pi: Maximum empirical FLOPs/sec of the device
|
||||
# - beta: Maximum empirical device DRAM bandwidth (bytes/sec) of the device
|
||||
# - I: Arithmetic intensity of the operator (FLOPs/bytes)
|
||||
# - op_flops: FLOPs required by the operator
|
||||
# - op_bytes: Bytes transferred to and from DRAM for the operator
|
||||
|
||||
# Calculation Steps:
|
||||
# 1. Calculate arithmetic intensity: I = op_flops / op_bytes
|
||||
# 2. Calculate estimated FLOPs/sec: est_flops_sec = min(pi, beta * I)
|
||||
# 3. Calculate estimated operator time: estimated_op_time = op_flops / est_flops_sec
|
||||
# This simplifies to: estimated_op_time = max(op_flops / pi, op_flops / (beta * I))
|
||||
# Further simplifying: estimated_op_time = max(op_flops / pi, op_bytes / beta)
|
||||
|
||||
# Simplified Formulas:
|
||||
# - compute_time = op_flops / pi
|
||||
# - transfer_time = op_bytes / beta
|
||||
# - estimated_op_time = max(compute_time, transfer_time)
|
||||
|
||||
kwargs = kwargs if kwargs else {}
|
||||
out = func(*args, **kwargs)
|
||||
op_time = 0.0
|
||||
func_packet = func._overloadpacket
|
||||
if func_packet not in _IGNORE_OPS:
|
||||
flat_args_kwargs, args_spec = pytree.tree_flatten((args, kwargs))
|
||||
flat_outs, out_spec = pytree.tree_flatten(out)
|
||||
transfer_time = get_transfer_time(flat_args_kwargs, flat_outs)
|
||||
|
||||
out_dtypes = {
|
||||
t.dtype
|
||||
for t in flat_outs
|
||||
if isinstance(t, torch.Tensor) and t.dtype in cls._float_types
|
||||
}
|
||||
|
||||
args, kwargs = pytree.tree_unflatten(flat_args_kwargs, args_spec)
|
||||
out = pytree.tree_unflatten(flat_outs, out_spec)
|
||||
|
||||
compute_time = get_compute_time(func_packet, args, kwargs, out, out_dtypes)
|
||||
# We get the estimated time as the max of the transfer time and
|
||||
# compute time. We divide by 1e6 to get the time in ms
|
||||
op_time = max(transfer_time, compute_time) / 1e6
|
||||
|
||||
return (out, op_time)
|
||||
|
||||
def display_modulewise_stats(self, depth: int = 2) -> None:
|
||||
"""
|
||||
Displays module-wise statistics collected by ``RuntimeEstimator``.
|
||||
|
||||
Prints the pre-forward and pre-backward execution orders.
|
||||
Displays the module-wise forward and backward runtimes in milliseconds.
|
||||
|
||||
Args:
|
||||
depth (int): The maximum depth of module hierarchy to display (default to 2).
|
||||
"""
|
||||
print("Pre-Forward Execution Order: ")
|
||||
for mod_fqn in self.mod_fw_pre_order:
|
||||
mod_depth = mod_fqn.count(".") + 1
|
||||
if mod_depth > depth:
|
||||
continue
|
||||
print(mod_fqn)
|
||||
print("Pre-Backward Execution Order: ")
|
||||
for mod_fqn in self.mod_bw_pre_order:
|
||||
mod_depth = mod_fqn.count(".") + 1
|
||||
if mod_depth > depth:
|
||||
continue
|
||||
print(mod_fqn)
|
||||
for mod_fqn, runtimes in self.mod_runtimes.items():
|
||||
mod_depth = mod_fqn.count(".") + 1
|
||||
if mod_depth > depth:
|
||||
continue
|
||||
print(
|
||||
f"{mod_fqn} fw: {runtimes.get('fw', 0.0):.3f}ms bw: {runtimes.get('bw', 0.0):.3f}ms"
|
||||
)
|
||||
|
||||
def __torch_dispatch__(self, func, types, args=..., kwargs=None): # type: ignore[no-untyped-def]
|
||||
# TODO: @sanketpurandare: Flatten tensors by desugaring the tensor subclasses
|
||||
# TODO: @sanketpurandare: Add logic for incorporating communication time
|
||||
res, op_time = self._estimate(func, args, kwargs)
|
||||
for par in self._mod_tracker.parents:
|
||||
if self._mod_tracker.is_bw:
|
||||
self.mod_runtimes[par]["bw"] += op_time
|
||||
else:
|
||||
self.mod_runtimes[par]["fw"] += op_time
|
||||
self.total_runtime += op_time
|
||||
return res
|
||||
|
||||
def __call__(self, estimate_mode_type: str) -> Self:
|
||||
"""
|
||||
Sets the estimate mode type.
|
||||
|
||||
Currently supported modes:
|
||||
- "operator-level-benchmark": Estimates runtime using operator benchmarking.
|
||||
- "operator-level-cost-model": Estimates runtime using roofline cost model.
|
||||
|
||||
Args:
|
||||
estimate_mode_type (str): The type of estimate mode to use.
|
||||
|
||||
Returns:
|
||||
RuntimeEstimator: The runtime estimator instance.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the estimate mode type is not supported.
|
||||
"""
|
||||
if estimate_mode_type == "operator-level-benchmark":
|
||||
self._estimate = RuntimeEstimator._benchmark_estimate
|
||||
elif estimate_mode_type == "operator-level-cost-model":
|
||||
self._estimate = RuntimeEstimator._roofline_estimate
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"estimate_mode_type {estimate_mode_type} not supported"
|
||||
)
|
||||
self._estimate_mode_type = estimate_mode_type
|
||||
return self
|
||||
|
||||
def __enter__(self) -> Self:
|
||||
fake_mode = active_fake_mode()
|
||||
assert isinstance(fake_mode, FakeTensorMode), (
|
||||
"No FakeTensorMode found, designed to used under FakeTensorMode"
|
||||
)
|
||||
RuntimeEstimator.fake_mode = fake_mode
|
||||
self.total_runtime = 0.0
|
||||
self.mod_runtimes = defaultdict(lambda: defaultdict(lambda: 0.0))
|
||||
self.mod_fw_pre_order.clear()
|
||||
self.mod_bw_pre_order.clear()
|
||||
self.mod_fw_post_order.clear()
|
||||
self.mod_bw_post_order.clear()
|
||||
self._mod_tracker.register_user_hooks(
|
||||
pre_fw_hook=lambda mod, inp: self.mod_fw_pre_order.append(
|
||||
self._mod_tracker.get_known_fqn(mod)
|
||||
),
|
||||
pre_bw_hook=lambda mod, g_out: self.mod_bw_pre_order.append(
|
||||
self._mod_tracker.get_known_fqn(mod)
|
||||
),
|
||||
post_fw_hook=lambda mod, inp, out: self.mod_fw_post_order.append(
|
||||
self._mod_tracker.get_known_fqn(mod)
|
||||
),
|
||||
post_bw_hook=lambda mod, g_inp: self.mod_bw_post_order.append(
|
||||
self._mod_tracker.get_known_fqn(mod)
|
||||
),
|
||||
)
|
||||
self._mod_tracker.__enter__()
|
||||
super().__enter__()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args: Any) -> None:
|
||||
print(
|
||||
f"Estimated ({self._estimate_mode_type})"
|
||||
f"total_time: {self.total_runtime:.3f} ms"
|
||||
)
|
||||
if len(self._no_fallback_kernel) > 0:
|
||||
print("no_fallback_kernel: ", list(self._no_fallback_kernel))
|
||||
super().__exit__(*args)
|
||||
self._mod_tracker.clear_user_hooks()
|
||||
self._mod_tracker.__exit__()
|
962
venv/Lib/site-packages/torch/distributed/_tools/sac_estimator.py
Normal file
962
venv/Lib/site-packages/torch/distributed/_tools/sac_estimator.py
Normal file
|
@ -0,0 +1,962 @@
|
|||
import math
|
||||
import os
|
||||
import sys
|
||||
from collections import OrderedDict
|
||||
from dataclasses import astuple, dataclass
|
||||
from typing import Any, NamedTuple, Optional
|
||||
from typing_extensions import Self
|
||||
|
||||
import torch
|
||||
from torch import nan, nn, UntypedStorage
|
||||
from torch._guards import active_fake_mode
|
||||
from torch._subclasses.fake_tensor import FakeTensorMode
|
||||
from torch.distributed._tools.common_utils import get_untyped_storages
|
||||
from torch.distributed._tools.mod_tracker import ModTracker
|
||||
from torch.distributed._tools.runtime_estimator import RuntimeEstimator
|
||||
from torch.testing._internal.composite_compliance import (
|
||||
is_inplace,
|
||||
is_inplace_view_fn,
|
||||
is_view_fn,
|
||||
)
|
||||
from torch.utils._python_dispatch import TorchDispatchMode
|
||||
from torch.utils._pytree import tree_flatten
|
||||
from torch.utils.checkpoint import SAC_IGNORED_OPS
|
||||
|
||||
|
||||
__all__ = ["SACEstimator", "SACStats", "MSPS", "SACTradeOffStats", "SACGreedyOrderMeta"]
|
||||
aten = torch.ops.aten
|
||||
|
||||
_ADDITIONAL_IGNORED_OPS = {
|
||||
aten.lift_fresh.default, # type: ignore[attr-defined]
|
||||
torch.ops.profiler._record_function_exit._RecordFunction, # type: ignore[attr-defined]
|
||||
aten.clone.default, # type: ignore[attr-defined] # seems needed for torch.compile
|
||||
}
|
||||
OPS_TO_ALWAYS_SKIP = SAC_IGNORED_OPS | _ADDITIONAL_IGNORED_OPS
|
||||
# This value is hard-coded here:
|
||||
# https://github.com/pytorch/pytorch/blob/5fba5d83f0703ff8077ab65448a998e9ad6598fd/c10/cuda/CUDACachingAllocator.cpp#L117
|
||||
_PYTORCH_MIN_ALLOCATE = (
|
||||
2**9 if int(os.environ.get("PYTORCH_NO_CUDA_MEMORY_CACHING", 0)) == 0 else 1
|
||||
)
|
||||
|
||||
|
||||
def _display_stats_tabular(headers: list[str], table_data: list[list[Any]]) -> None:
|
||||
try:
|
||||
from tabulate import tabulate
|
||||
except ImportError as err:
|
||||
raise ImportError("Please install tabulate.") from err
|
||||
|
||||
# Use tabulate to print the table
|
||||
print(tabulate(table_data, headers=headers, tablefmt="rst"))
|
||||
|
||||
|
||||
# Based on:
|
||||
# https://github.com/fairinternal/xformers/blob/0ded5697a2ea15711ce45131002d04e72053cc6d/xformers/checkpoint.py#L62
|
||||
@dataclass
|
||||
class _SACMetadata:
|
||||
"""
|
||||
Stores metadata for a single operator for SAC.
|
||||
|
||||
Attributes:
|
||||
func (Any): The operator function.
|
||||
time_taken (float): The time taken by the operator.
|
||||
memory_used (float): The memory used by the operator.
|
||||
curr_idx (int): The current operator index.
|
||||
output_ids (Tuple[int, ...]): The storage IDs of the operator's outputs.
|
||||
inplace_info (Tuple[int, ...]): Tuple of self and parent operator for in-place operator.
|
||||
is_view_like (bool): Whether the operator is view-like.
|
||||
is_rand_op (bool): Whether the operator is a random operator.
|
||||
"""
|
||||
|
||||
func: Any
|
||||
time_taken: float
|
||||
memory_used: float
|
||||
curr_idx: int
|
||||
output_ids: tuple[int, ...]
|
||||
inplace_info: tuple[int, ...]
|
||||
is_view_like: bool
|
||||
is_rand_op: bool
|
||||
|
||||
|
||||
@dataclass
|
||||
class _SACModMetadata:
|
||||
"""
|
||||
Stores metadata for a module for SAC.
|
||||
|
||||
Attributes:
|
||||
start_idx (int): The starting index of the module's operators.
|
||||
force_store_random (bool): Whether to force store random operators in the module.
|
||||
sac_metadata (List[_SACMetadata]): List of metadata for each operator in the module.
|
||||
"""
|
||||
|
||||
start_idx: int
|
||||
force_store_random: bool
|
||||
sac_metadata: list[_SACMetadata]
|
||||
|
||||
|
||||
@dataclass
|
||||
class SACStats:
|
||||
"""
|
||||
A class for storing Activation Checkpointing statistics corresponding to a module.
|
||||
|
||||
Attributes:
|
||||
func_names (List[str]): List of operator names.
|
||||
runtimes (List[float]): List of operator runtimes in millliseconds.
|
||||
memory (List[int]): List of operator memory usage in bytes.
|
||||
view_like_ops (List[int]): Indices of view-like operators.
|
||||
rand_ops (List[int]): Indices of random operators.
|
||||
saved_autograd_ops (List[int]): Indices of operator results saved by autograd engine.
|
||||
inplace_ops (List[Tuple[int, int]]): Tuple of indices of op and its first parent for Inplace operators.
|
||||
force_store_random (bool): Whether to force store random operator results.
|
||||
"""
|
||||
|
||||
func_names: list[str]
|
||||
runtimes: list[float]
|
||||
memory: list[int]
|
||||
view_like_ops: list[int]
|
||||
rand_ops: list[int]
|
||||
saved_autograd_ops: list[int]
|
||||
inplace_ops: list[tuple[int, int]]
|
||||
force_store_random: bool
|
||||
|
||||
|
||||
class MSPS(NamedTuple):
|
||||
"""
|
||||
Represents Memory and Runtime Statistics for an operator/operator group.
|
||||
|
||||
Attributes:
|
||||
func_names (set[str]): Set of operator/operator group names.
|
||||
op_idx (int): Operator index (group head index incase of operator groups).
|
||||
memory (int): Memory usage in bytes.
|
||||
runtime (float): Runtime in milliseconds.
|
||||
msps (float): Memory per second calculated as memory/runtime.
|
||||
"""
|
||||
|
||||
func_names: set[str]
|
||||
op_idx: int
|
||||
memory: int
|
||||
runtime: float
|
||||
msps: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class SACTradeOffStats:
|
||||
"""
|
||||
Stores statistics for activation-checkpointing trade-off.
|
||||
|
||||
Attributes:
|
||||
n_segments (int): Number of piecewise linear segments fitted to the trade-off curve.
|
||||
slopes (List[float]): Slopes of the pieces of linear segments fitted to the trade-off curve.
|
||||
intercepts (List[float]): Intercepts of the of the pieces of linear segments fitted to the trade-off curve.
|
||||
fit_breaks (List[float]): Breakpoints of the of the pieces of linear segments fitted to the trade-off curve.
|
||||
tradeoff_curve (OrderedDict[float, float]): Trade-off curve data of memory discarded vs recomputation time.
|
||||
sac_memory (int): Total memory of operations available for activation checkpointing in bytes.
|
||||
sac_runtime (float): Total runtime of operations available for activation checkpointing in milliseconds.
|
||||
"""
|
||||
|
||||
n_segments: int
|
||||
slopes: list[float]
|
||||
intercepts: list[float]
|
||||
fit_breaks: list[float]
|
||||
tradeoff_curve: OrderedDict[float, float]
|
||||
sac_memory: int
|
||||
sac_runtime: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class SACGreedyOrderMeta:
|
||||
"""
|
||||
Stores metadata for Greedy-order SAC.
|
||||
|
||||
Attributes:
|
||||
recomputed_ops (set[int]): Set of operator indices to be recomputed.
|
||||
stored_ops (set[int]): Set of operator indices to be stored.
|
||||
inplace_op_groups (dict[int, set[int]]): Dictionary of inplace operator groups from group-head to operators.
|
||||
random_ops_group (dict[int, set[int]]): Dictionary of random op group head to random ops.
|
||||
msps_meta (list[MSPS]): List of Memory and Runtime Statistics for operators.
|
||||
"""
|
||||
|
||||
recomputed_ops: set[int]
|
||||
stored_ops: set[int]
|
||||
inplace_op_groups: dict[int, set[int]]
|
||||
random_ops_group: dict[int, set[int]]
|
||||
msps_meta: list[MSPS]
|
||||
|
||||
|
||||
class SACEstimator(TorchDispatchMode):
|
||||
"""
|
||||
Estimates the memory and recomputation time trade-offs for applying Selective Activation Checkpointing (SAC).
|
||||
|
||||
This class provides a ``TorchDispatchMode`` based context manager that can be used to estimate the memory and
|
||||
runtime trade-offs of functions or ``torch.nn.Module``s for Selective Activation Checkpointing (SAC). It provides
|
||||
detailed statistics and metadata information for operators of each module and provides a greedy order for selecting
|
||||
the operators to be recomputed/checkpointed. It also constructs the per-module trade-off graph of discarded memory
|
||||
vs recomputation time for the obtained greedy order. Using ``RuntimeEstimator`` under the hood, it supports two
|
||||
estimation modes, `operator-level-benchmark` and (`operator-level-cost-model` (roofline model).
|
||||
|
||||
Attributes:
|
||||
sac_mod_stats (Dict[str, SACStats]): Dictionary from module FQN (fuly qualified name) to ``SACStats``.
|
||||
sac_mod_tradeoff_stats (Dict[str, SACTradeOffStats]): Dictionary from module FQN to ``SACTradeOffStats``.
|
||||
sac_mod_greedy_order_meta (Dict[str, SACGreedyOrderMeta]): Dictionary from module FQN to ``SACGreedyOrderMeta``.
|
||||
|
||||
Note:
|
||||
1) This class is designed to be used under ``FakeTensorMode``.
|
||||
2) Currently, it only supports estimation of compute time and memory usage, and does not consider communication.
|
||||
|
||||
Example usage:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
sac_estimator = SACEstimator()
|
||||
with FakeTensorMode():
|
||||
module = ...
|
||||
inp = ...
|
||||
with sac_estimator("operator-level-cost-model"):
|
||||
output = module(inp)
|
||||
sac_estimator.display_modulewise_sac_stats(depth=4, print_tabular=True)
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.sac_mod_stats: dict[str, SACStats] = {}
|
||||
self.sac_mod_tradeoff_stats: dict[str, SACTradeOffStats] = {}
|
||||
self.sac_mod_greedy_order_meta: dict[str, SACGreedyOrderMeta] = {}
|
||||
self._mod_tracker = ModTracker()
|
||||
self._sac_metadata: list[_SACMetadata] = []
|
||||
self._sac_mod_metadata: dict[str, _SACModMetadata] = {}
|
||||
self._leaf_modules: set[str] = set()
|
||||
self._saved_tensor_hook_ctx = torch.autograd.graph.saved_tensors_hooks(
|
||||
self._pack_hook, lambda x: x
|
||||
)
|
||||
self._saved_tensor_ids: set[int] = set()
|
||||
self._estimate_runtime = RuntimeEstimator._roofline_estimate
|
||||
|
||||
def _pack_hook(self, x: torch.Tensor) -> torch.Tensor:
|
||||
# Hook function to track underlying storage IDs of tensors
|
||||
# Updates the _saved_tensor_ids set with the IDs of the tensor's storages
|
||||
# Used in conjunction with torch.autograd.graph.saved_tensors_hooks
|
||||
untyped_storages = get_untyped_storages(x)
|
||||
storage_ids = (hash(st) for st in untyped_storages)
|
||||
self._saved_tensor_ids.update(storage_ids)
|
||||
return x
|
||||
|
||||
def _pre_fw_hook(self, mod: nn.Module, inputs: Any) -> None:
|
||||
# Pre-forward hook function to prepare module metadata
|
||||
# Tracks module FQN, force store random flag, and ``SACModMetadata``
|
||||
# Initializes metadata for non-leaf modules, marks leaf modules
|
||||
mod_fqn = self._mod_tracker.get_known_fqn(mod)
|
||||
assert mod_fqn is not None
|
||||
num_children = sum(1 for _ in mod.children())
|
||||
if num_children > 0:
|
||||
force_store_random = self._get_force_store_random(inputs)
|
||||
self._sac_mod_metadata[mod_fqn] = _SACModMetadata(
|
||||
start_idx=len(self._sac_metadata),
|
||||
force_store_random=force_store_random,
|
||||
sac_metadata=[],
|
||||
)
|
||||
else:
|
||||
self._leaf_modules.add(mod_fqn)
|
||||
|
||||
def _post_fw_hook(self, mod: nn.Module, inputs: Any, outputs: Any) -> None:
|
||||
# 1. Retrieves the module's FQN and checks if it's a leaf module
|
||||
# 2. If not a leaf module, computes:
|
||||
# - ``SACStats`` using the module's metadata and force store random flag
|
||||
# - ``SACGreedyOrderMeta`` using the computed SAC statistics
|
||||
mod_fqn = self._mod_tracker.get_known_fqn(mod)
|
||||
assert mod_fqn is not None
|
||||
if mod_fqn in self._leaf_modules:
|
||||
return
|
||||
else:
|
||||
self.sac_mod_stats[mod_fqn] = self._get_sac_stats(
|
||||
data=self._sac_mod_metadata[mod_fqn].sac_metadata,
|
||||
force_store_random=self._sac_mod_metadata[mod_fqn].force_store_random,
|
||||
)
|
||||
self.sac_mod_greedy_order_meta[mod_fqn] = self._get_greedy_order_meta(
|
||||
self.sac_mod_stats[mod_fqn]
|
||||
)
|
||||
|
||||
def _get_force_store_random(self, inputs: Any) -> bool:
|
||||
flat_inputs, _ = tree_flatten(inputs)
|
||||
return all(not isinstance(x, torch.Tensor) for x in flat_inputs)
|
||||
|
||||
def _get_sac_stats(
|
||||
self, data: list[_SACMetadata], force_store_random: bool
|
||||
) -> SACStats:
|
||||
# 1. Ignore the operations that should be skipped by SAC such as aten.detach.default because autograd
|
||||
# inserts those during backward and it breaks the fwd-bwd alignment
|
||||
filtered_data = [x for x in data if x.func not in OPS_TO_ALWAYS_SKIP]
|
||||
|
||||
(
|
||||
ops,
|
||||
runtimes_,
|
||||
memory_,
|
||||
new_ids,
|
||||
output_ids,
|
||||
inplace_ops_,
|
||||
view_like_ops_,
|
||||
rand_ops_,
|
||||
) = zip(*[astuple(x) for x in filtered_data], strict=True)
|
||||
|
||||
# 2. Extract the metadata information
|
||||
runtimes = list(runtimes_)
|
||||
memory = list(memory_)
|
||||
func_names = [op._overloadpacket.__name__ for op in ops]
|
||||
view_like_ops = [i for i, x in enumerate(view_like_ops_) if x]
|
||||
rand_ops = [i for i, x in enumerate(rand_ops_) if x]
|
||||
saved_autograd_ops = [
|
||||
i
|
||||
for i, out_ids in enumerate(output_ids)
|
||||
if set(out_ids).issubset(self._saved_tensor_ids)
|
||||
]
|
||||
|
||||
# 3. Remap the inplace indices as we have removed OPS_TO_ALWAYS_SKIP
|
||||
# FIXME @sanketpurandare: Fix this by changing the parent of the inplace-op
|
||||
# to itself if the original parent is in OPS_TO_ALWAYS_SKIP.
|
||||
try:
|
||||
inplace_ops = [tuple(map(new_ids.index, x)) for x in inplace_ops_ if x]
|
||||
except ValueError as err:
|
||||
raise ValueError(
|
||||
f"The remapping of inplace ops failed since one of the inplace op parents"
|
||||
f" must have been present in {OPS_TO_ALWAYS_SKIP}"
|
||||
) from err
|
||||
|
||||
# 4. The last operation is always stored as the output of the checkpoint
|
||||
# block, so we can avoid recomputing it. We set the memory to zero
|
||||
# instead of adding a new constraint because we want both the 0 and 1
|
||||
# endpoints for memory_budget to be valid
|
||||
# FIXME @sanketpurandare: this heuristic for finding the last non-view non-inplace op
|
||||
# might not always be correct, which would yield suboptimal policies
|
||||
last_op = len(ops) - 1
|
||||
skip_ops_ = set(view_like_ops) | set({x[0] for x in inplace_ops})
|
||||
reversed_skip_ops = sorted(skip_ops_, reverse=True)
|
||||
for op in reversed_skip_ops:
|
||||
if op == last_op:
|
||||
last_op -= 1
|
||||
|
||||
memory[last_op] = 0
|
||||
|
||||
# 5. Create a single ``SACStats`` object for the entire block of ``_SACMetadata``.
|
||||
return SACStats(
|
||||
func_names=func_names,
|
||||
runtimes=runtimes,
|
||||
memory=memory,
|
||||
view_like_ops=view_like_ops,
|
||||
rand_ops=rand_ops,
|
||||
saved_autograd_ops=saved_autograd_ops,
|
||||
inplace_ops=inplace_ops, # type: ignore[arg-type]
|
||||
force_store_random=force_store_random,
|
||||
)
|
||||
|
||||
def _get_inplace_metadata(
|
||||
self, func: Any, out_storages: set[UntypedStorage]
|
||||
) -> tuple[int, tuple[int, ...], dict[str, tuple[int, ...]]]:
|
||||
# 1. Get the current index of the metadata obtained so far
|
||||
curr_idx = len(self._sac_metadata)
|
||||
# 2. Get the set of active modules that are not leaf
|
||||
active_mod_fqns: set[str] = {
|
||||
par for par in self._mod_tracker.parents if par not in self._leaf_modules
|
||||
}
|
||||
# 3. Output ids are the identifies of the storage objects corresponding to the tensors
|
||||
output_ids = tuple(hash(st) for st in out_storages)
|
||||
# 4. If the function is not inplace, return
|
||||
if not is_inplace(func):
|
||||
return curr_idx, output_ids, {mod_fqn: () for mod_fqn in active_mod_fqns}
|
||||
|
||||
op_idx = curr_idx
|
||||
# 5. Initialize the parent op ids of the inplace op for each of the active modules
|
||||
mod_op_parent_idxs: dict[str, int] = {
|
||||
mod_fqn: -1 for mod_fqn in active_mod_fqns
|
||||
}
|
||||
for i, d in enumerate(self._sac_metadata):
|
||||
# 6. Find the first occurence of a tensor corresponding to each module that
|
||||
# shares the same storage as the current tensor
|
||||
past_output_ids = d.output_ids
|
||||
if set(output_ids).issubset(set(past_output_ids)):
|
||||
for mod_fqn, op_parent_idx in mod_op_parent_idxs.items():
|
||||
if op_parent_idx == -1:
|
||||
if acm_stats := self._sac_mod_metadata.get(mod_fqn, None):
|
||||
if i >= acm_stats.start_idx:
|
||||
mod_op_parent_idxs[mod_fqn] = i
|
||||
else:
|
||||
assert mod_fqn == "Global"
|
||||
mod_op_parent_idxs[mod_fqn] = i
|
||||
# 7. If no parent tensor is found, then it's probably an inplace op on the arguments
|
||||
# so one can just store the current-op idx as parent idx
|
||||
for mod_fqn, op_parent_idx in mod_op_parent_idxs.items():
|
||||
if op_parent_idx < 0:
|
||||
mod_op_parent_idxs[mod_fqn] = op_idx
|
||||
mod_inplace_info = {
|
||||
mod_fqn: (op_idx, mod_op_parent_idxs[mod_fqn])
|
||||
for mod_fqn in active_mod_fqns
|
||||
}
|
||||
return curr_idx, output_ids, mod_inplace_info # type: ignore[return-value]
|
||||
|
||||
def __torch_dispatch__( # type: ignore[no-untyped-def]
|
||||
self, func, types, args=..., kwargs=None
|
||||
):
|
||||
# 1. Get the runtime estimate
|
||||
out, op_time = self._estimate_runtime(func, args, kwargs)
|
||||
flat_outs, _ = tree_flatten(out)
|
||||
out_storages_cuda: set[UntypedStorage] = set()
|
||||
out_storages_cpu: set[UntypedStorage] = set()
|
||||
cuda_devices: set[torch.device] = set()
|
||||
for o in flat_outs:
|
||||
if isinstance(o, torch.Tensor):
|
||||
if o.device.type == "cuda":
|
||||
out_storages_cuda.update(get_untyped_storages(o))
|
||||
cuda_devices.add(o.device)
|
||||
else:
|
||||
out_storages_cpu.update(get_untyped_storages(o))
|
||||
|
||||
# Check if there's more than 1 CUDA device
|
||||
assert len(cuda_devices) <= 1, (
|
||||
f"{func.__name__}'s output has more than 1 CUDA devices {cuda_devices}"
|
||||
)
|
||||
|
||||
# 2. Get the memory consumed by output
|
||||
nbytes_cuda = sum(
|
||||
math.ceil(st.nbytes() / _PYTORCH_MIN_ALLOCATE) * _PYTORCH_MIN_ALLOCATE
|
||||
for st in out_storages_cuda
|
||||
)
|
||||
nbytes_cpu = sum(st.nbytes() for st in out_storages_cpu)
|
||||
nbytes = nbytes_cuda + nbytes_cpu
|
||||
# 3. Get the current operator index, output storage identifiers and inplace metadata
|
||||
out_storages = out_storages_cuda | out_storages_cpu
|
||||
curr_idx, output_ids, mod_inplace_info = self._get_inplace_metadata(
|
||||
func, out_storages
|
||||
)
|
||||
# 4. Determine if the function is in-place, random-op or a view-like
|
||||
is_view_like = is_view_fn(func) or is_inplace_view_fn(func)
|
||||
is_rand_op = torch.Tag.nondeterministic_seeded in func.tags
|
||||
if is_view_like:
|
||||
nbytes = 0
|
||||
# sdpa has non-deterministic seed, but might be deterministic
|
||||
# if no dropout is applied
|
||||
if func.overloadpacket.__name__ == "_scaled_dot_product_flash_attention":
|
||||
is_rand_op = kwargs.get("dropout_p", 0) != 0
|
||||
# 5. Create metadata information per active non-leaf module
|
||||
for mod_fqn in self._mod_tracker.parents:
|
||||
if mod_fqn in self._leaf_modules:
|
||||
continue
|
||||
acm = _SACMetadata(
|
||||
func=func,
|
||||
time_taken=op_time,
|
||||
memory_used=nbytes,
|
||||
curr_idx=curr_idx,
|
||||
output_ids=output_ids,
|
||||
inplace_info=mod_inplace_info[mod_fqn],
|
||||
is_view_like=is_view_like,
|
||||
is_rand_op=is_rand_op,
|
||||
)
|
||||
if acm_stats := self._sac_mod_metadata.get(mod_fqn, None):
|
||||
acm_stats.sac_metadata.append(acm)
|
||||
else:
|
||||
assert mod_fqn == "Global", (
|
||||
f"Module {mod_fqn} not found in AC Mod Stats"
|
||||
)
|
||||
self._sac_metadata.append(acm)
|
||||
|
||||
return out
|
||||
|
||||
def _get_greedy_order_meta(self, sac_stats: SACStats) -> SACGreedyOrderMeta:
|
||||
# An inplace-op group is a set of inplace-ops that operate on the same underlying tensor storage.
|
||||
# 1. inplace_op_groups: A dictionary from the top-most parent of inplace-ops to the inplace-ops in the group
|
||||
# The top-most op can itself be an inplace-op or can be a non-inplace op.
|
||||
# 2. inplace_op_to_group_head: A dictionary that maps all the inplace-ops to their respective group heads.
|
||||
inplace_op_groups: dict[int, set[int]] = {}
|
||||
inplace_op_to_group_head: dict[int, int] = dict(sac_stats.inplace_ops)
|
||||
|
||||
# Initialize inplace_op_groups using inplace_op_to_group_head
|
||||
for op_idx, group_head_idx in inplace_op_to_group_head.items():
|
||||
op_group = inplace_op_groups.setdefault(group_head_idx, {group_head_idx})
|
||||
op_group.add(op_idx)
|
||||
|
||||
# Like inplace ops, all of the random ops in the function/module should all be either recomputed or saved
|
||||
# as a group. This is because, they affect the ranom seed generator. If force_store_random is set True,
|
||||
# all of the random ops will be stored by default. For easy of manageability, we store the top-most random op
|
||||
# as the leader of the random_ops_group.
|
||||
random_ops_group: dict[int, set[int]] = {}
|
||||
random_group_head_idx = min(sac_stats.rand_ops, default=-1)
|
||||
has_rand_ops = bool(sac_stats.rand_ops)
|
||||
if has_rand_ops:
|
||||
random_ops_group[random_group_head_idx] = set(sac_stats.rand_ops)
|
||||
|
||||
# 1. Random ops are stored if force_store_random is set
|
||||
# 2. View-like ops are recomputed by default
|
||||
# 3. For inplace_op_groups:
|
||||
# a) If the head of this group is an inplace op, then we have to store the entire group.
|
||||
# b) If any op in the group is random and force_store_random is set, then entire group will be stored.
|
||||
# c) If none of ops in the group are random and the head of the group is not an in-place op, then
|
||||
# this group can be considered for recomputation in its entireity
|
||||
stored_ops: set[int] = set()
|
||||
recomputed_ops: set[int] = set()
|
||||
# Case 1:
|
||||
if has_rand_ops and sac_stats.force_store_random:
|
||||
stored_ops.add(random_group_head_idx)
|
||||
# Case 2:
|
||||
recomputed_ops.update(set(sac_stats.view_like_ops))
|
||||
|
||||
for group_head_idx, op_group in inplace_op_groups.items():
|
||||
# Case 3a:
|
||||
if group_head_idx in inplace_op_to_group_head:
|
||||
stored_ops.add(group_head_idx)
|
||||
# Case 3b:
|
||||
if (
|
||||
sac_stats.force_store_random & len(op_group & set(sac_stats.rand_ops))
|
||||
> 0
|
||||
):
|
||||
stored_ops.add(group_head_idx)
|
||||
|
||||
# The potential recompute candidates are populated as:
|
||||
recompute_candidates: set[int] = set()
|
||||
# 1) The random group head if it is not stored
|
||||
if has_rand_ops and random_group_head_idx not in stored_ops:
|
||||
recompute_candidates.add(random_group_head_idx)
|
||||
# 2) The in-place op group heads that are not stored
|
||||
recompute_candidates.update(set(inplace_op_groups.keys()) - stored_ops)
|
||||
# 3) The non-inplace and non-random ops that are neither stored nor recomputed by default
|
||||
recompute_candidates.update(
|
||||
set(range(len(sac_stats.memory)))
|
||||
- recomputed_ops
|
||||
- stored_ops
|
||||
- set(inplace_op_to_group_head.keys())
|
||||
- set(sac_stats.rand_ops)
|
||||
)
|
||||
|
||||
# We define msps for a recomp candidate as the ratio of memory/runtime aka memory savings per second
|
||||
msps_meta: list[MSPS] = []
|
||||
for cand_idx in recompute_candidates:
|
||||
op_indices = {cand_idx}
|
||||
if cand_idx in inplace_op_groups:
|
||||
op_indices.update(inplace_op_groups[cand_idx])
|
||||
if has_rand_ops and cand_idx == random_group_head_idx:
|
||||
op_indices.update(sac_stats.rand_ops)
|
||||
|
||||
mem = sum(sac_stats.memory[op_idx] for op_idx in op_indices)
|
||||
runtime = sum(sac_stats.runtimes[op_idx] for op_idx in op_indices)
|
||||
func_names = {sac_stats.func_names[op_idx] for op_idx in op_indices}
|
||||
msps = (mem / runtime) if runtime > 0 else sys.float_info.max
|
||||
msps_meta.append(MSPS(func_names, cand_idx, mem, runtime, msps))
|
||||
# We choose canidates to be recomputed based on increasing msps
|
||||
msps_meta.sort(key=lambda x: x.msps, reverse=True)
|
||||
return SACGreedyOrderMeta(
|
||||
recomputed_ops, stored_ops, inplace_op_groups, random_ops_group, msps_meta
|
||||
)
|
||||
|
||||
def _get_sac_tradeoff_pwlf_stats(
|
||||
self,
|
||||
sac_stats: SACStats,
|
||||
greedy_order_meta: SACGreedyOrderMeta,
|
||||
n_segments: int = 2,
|
||||
save_tradeoff_graph: bool = False,
|
||||
filename: str = "ac_tradeoff",
|
||||
) -> SACTradeOffStats:
|
||||
try:
|
||||
import numpy as np # type: ignore[import-not-found]
|
||||
import pwlf # type: ignore[import-untyped, import-not-found]
|
||||
except ImportError as err:
|
||||
raise ImportError("Please install pwlf and numpy package.") from err
|
||||
|
||||
stored_ops, recomputed_ops, inplace_op_groups, random_ops_group, msps_meta = (
|
||||
greedy_order_meta.stored_ops,
|
||||
greedy_order_meta.recomputed_ops,
|
||||
greedy_order_meta.inplace_op_groups,
|
||||
greedy_order_meta.random_ops_group,
|
||||
greedy_order_meta.msps_meta,
|
||||
)
|
||||
# 1. Intitialize the discarded memory and recomputation runtime to sum of already chosen recomputed_ops
|
||||
recomp_indices: set[int] = set()
|
||||
for r_idx in recomputed_ops:
|
||||
recomp_indices.add(r_idx)
|
||||
if r_idx in inplace_op_groups:
|
||||
recomp_indices.update(inplace_op_groups[r_idx])
|
||||
if r_idx in random_ops_group:
|
||||
recomp_indices.update(random_ops_group[r_idx])
|
||||
|
||||
discarded_mem = sum(sac_stats.memory[op_idx] for op_idx in recomp_indices)
|
||||
recomp_runtime = sum(sac_stats.runtimes[op_idx] for op_idx in recomp_indices)
|
||||
# 2. Initialize the max recomputation time and total recomputation memory
|
||||
sac_runtime = sum(sac_stats.runtimes)
|
||||
sac_memory = sum(sac_stats.memory)
|
||||
# 3. Tradeoff curve stores the KV pair of the dicarded memory to total memory and,
|
||||
# recomputation time to total runtime incurred.
|
||||
delta = 1e-2
|
||||
tradeoff_curve = OrderedDict()
|
||||
# 4. Initialize the trade-off curve with the stats of of already chosen recomputed_ops
|
||||
tradeoff_curve[(discarded_mem / sac_memory) + delta] = (
|
||||
recomp_runtime / sac_runtime
|
||||
)
|
||||
# 5. Update the trade-off curve with memory and runtime stats of SAC candidates in the
|
||||
# greedy order of their ``MSPS``.
|
||||
for cand in msps_meta:
|
||||
discarded_mem += cand.memory
|
||||
recomp_runtime += cand.runtime
|
||||
tradeoff_curve[(discarded_mem / sac_memory) + delta] = (
|
||||
recomp_runtime / sac_runtime
|
||||
)
|
||||
# 6. Finally, we add the memory and recomputation time of the always stored ops.
|
||||
stored_indices: set[int] = set()
|
||||
for s_idx in stored_ops:
|
||||
stored_indices.add(s_idx)
|
||||
if s_idx in inplace_op_groups:
|
||||
stored_indices.update(inplace_op_groups[s_idx])
|
||||
if s_idx in random_ops_group:
|
||||
stored_indices.update(random_ops_group[s_idx])
|
||||
discarded_mem += sum(sac_stats.memory[op_idx] for op_idx in stored_indices)
|
||||
recomp_runtime += sum(sac_stats.runtimes[op_idx] for op_idx in stored_indices)
|
||||
tradeoff_curve[(discarded_mem / sac_memory) + delta] = (
|
||||
recomp_runtime / sac_runtime
|
||||
)
|
||||
x_ = list(tradeoff_curve.keys())
|
||||
y_ = list(tradeoff_curve.values())
|
||||
# 7. We shift the y values to left and x values to right to upperbound the trade-off function
|
||||
# TODO: Write a better explanation why this needs to be done
|
||||
x = x_[: len(x_) - 1]
|
||||
y = y_[1:]
|
||||
tradeoff_pwlf = pwlf.PiecewiseLinFit(x, y)
|
||||
# 8. Fit a piecewise linear function with the specified number of segments to the trade-off curve.
|
||||
n_segments = max(min(len(x) - 2, n_segments), 1)
|
||||
tradeoff_pwlf.fit(n_segments=n_segments)
|
||||
|
||||
# save prediction graph
|
||||
def save_prediction_graph(
|
||||
pwlf_: pwlf.PiecewiseLinFit, x: list[float], y: list[float], filename: str
|
||||
) -> None:
|
||||
try:
|
||||
import matplotlib.pyplot as plt # type: ignore[import-not-found]
|
||||
import numpy as np # type: ignore[import-not-found]
|
||||
except ImportError as err:
|
||||
raise ImportError(
|
||||
"Install matplotlib and numpy using pip: pip install matplotlib numpy"
|
||||
) from err
|
||||
# predict for the determined points
|
||||
xHat = np.linspace(min(x), max(x), num=10000)
|
||||
yHat = pwlf_.predict(xHat)
|
||||
|
||||
# plot the results
|
||||
plt.figure()
|
||||
plt.plot(x, y, "o", label="Shifted")
|
||||
plt.plot(xHat, yHat, "-", label="Predicted")
|
||||
plt.plot(x_, y_, "x", label="Original")
|
||||
plt.ylabel("Recomp time / Total recomp time")
|
||||
plt.xlabel("Memory discarded / Total memory")
|
||||
plt.legend()
|
||||
plt.title(f"{filename}")
|
||||
plt.suptitle(
|
||||
f"Total Memory = {sac_memory} B Total Runtime = {sac_runtime:.4f} ms",
|
||||
fontsize=10,
|
||||
)
|
||||
folder_name = "tradeoff_graphs"
|
||||
if not os.path.exists(folder_name):
|
||||
os.makedirs(folder_name)
|
||||
# Save the plots in the folder
|
||||
plt.savefig(os.path.join(folder_name, f"{filename}.png"))
|
||||
|
||||
if save_tradeoff_graph:
|
||||
save_prediction_graph(tradeoff_pwlf, x, y, filename)
|
||||
# 9. Obtain the slopes, intercepts and breakpoints of the fitted piecewise linear functions
|
||||
slopes = tradeoff_pwlf.calc_slopes().tolist()
|
||||
assert isinstance(tradeoff_pwlf.intercepts, np.ndarray) and isinstance(
|
||||
tradeoff_pwlf.fit_breaks, np.ndarray
|
||||
)
|
||||
intercepts = tradeoff_pwlf.intercepts.tolist()
|
||||
fit_breaks = tradeoff_pwlf.fit_breaks.tolist()
|
||||
return SACTradeOffStats(
|
||||
n_segments=n_segments,
|
||||
slopes=slopes,
|
||||
intercepts=intercepts, # type: ignore[arg-type]
|
||||
fit_breaks=fit_breaks, # type: ignore[arg-type]
|
||||
tradeoff_curve=tradeoff_curve,
|
||||
sac_memory=sac_memory,
|
||||
sac_runtime=sac_runtime,
|
||||
)
|
||||
|
||||
def display_sac_stats(
|
||||
self, sac_stats: SACStats, print_tabular: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Displays the SAC statistics.
|
||||
|
||||
Args:
|
||||
sac_stats (SACStats): The SAC statistics to display.
|
||||
print_tabular (bool, optional): Whether to print the statistics in a tabular format. Defaults to False.
|
||||
|
||||
Prints:
|
||||
1. Total Memory: The total memory usage in bytes.
|
||||
2. Total Runtime: The total runtime in milliseconds.
|
||||
3. Store Random: A flag indicating whether to force store random operator results.
|
||||
|
||||
Followed by a table with the following columns:
|
||||
1. Op Idx: The operator index.
|
||||
2. Op Name: The operator name.
|
||||
3. Runtimes (ms): The operator runtime in milliseconds.
|
||||
4. Memory (B): The operator memory usage in bytes.
|
||||
5. View-like: A flag indicating whether the operator is view-like.
|
||||
6. Random: A flag indicating whether the operator is random.
|
||||
7. Saved Autograd: A flag indicating whether the operator's result is saved by autograd engine.
|
||||
8. In-place: The index of the operator's first parent, or None if not in-place.
|
||||
|
||||
If print_tabular is True, the table is printed in a tabular format.
|
||||
Otherwise, the table is printed in a plain text format.
|
||||
"""
|
||||
print(
|
||||
f"Total Memory: {sum(sac_stats.memory)} B Total Runtime: {sum(sac_stats.runtimes)} ms"
|
||||
f" Store Random: {sac_stats.force_store_random}"
|
||||
)
|
||||
table_data = []
|
||||
op_parent = dict(sac_stats.inplace_ops)
|
||||
for i, fn_name in enumerate(sac_stats.func_names):
|
||||
row = [
|
||||
str(i),
|
||||
fn_name,
|
||||
f"{sac_stats.runtimes[i]:.4f}",
|
||||
str(sac_stats.memory[i]),
|
||||
str(i in sac_stats.view_like_ops),
|
||||
str(i in sac_stats.rand_ops),
|
||||
str(i in sac_stats.saved_autograd_ops),
|
||||
str(op_parent.get(i, None)),
|
||||
]
|
||||
table_data.append(row)
|
||||
# Define headers
|
||||
headers = [
|
||||
"Op Idx",
|
||||
"Op Name",
|
||||
"Runtimes(ms)",
|
||||
"Memory (B)",
|
||||
"View-like",
|
||||
"Random",
|
||||
"Saved Autograd",
|
||||
"In-place",
|
||||
]
|
||||
if print_tabular:
|
||||
_display_stats_tabular(headers, table_data)
|
||||
else:
|
||||
max_widths = [0 for _ in range(len(headers))]
|
||||
table_data.insert(0, headers)
|
||||
for row in table_data:
|
||||
for i, elem in enumerate(row):
|
||||
max_widths[i] = max(max_widths[i], len(elem))
|
||||
for row in table_data:
|
||||
print(
|
||||
"\t".join(
|
||||
[f"{elem:<{max_widths[i]}}" for i, elem in enumerate(row)]
|
||||
)
|
||||
)
|
||||
|
||||
def display_sac_tradeoff_stats(
|
||||
self,
|
||||
greedy_order_meta: SACGreedyOrderMeta,
|
||||
sac_stats: SACStats,
|
||||
print_tabular: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Displays the SAC trade-off statistics.
|
||||
|
||||
Args:
|
||||
greedy_order_meta (SACGreedyOrderMeta): The SAC greedy order metadata.
|
||||
sac_stats (SACStats): The SAC statistics.
|
||||
print_tabular (bool, optional): Whether to print the statistics in a tabular format. Defaults to False.
|
||||
|
||||
Prints:
|
||||
A table with the following columns:
|
||||
1. Op Id(s): The operator index(es).
|
||||
2. Op Name(s): The operator name(s).
|
||||
3. Discarded Mem (%): The percentage of discarded memory.
|
||||
4. Discarded Mem (B): The discarded memory in bytes.
|
||||
5. Recomp time (%): The percentage of recomputed time.
|
||||
6. Recomp time (ms): The recomputed time in milliseconds.
|
||||
7. MSPS: The memory per second.
|
||||
8. Always Stored: A flag indicating whether the operator is always stored.
|
||||
9. Always Recomputed: A flag indicating whether the operator is always recomputed.
|
||||
|
||||
If print_tabular is True, the table is printed in a tabular format.
|
||||
Otherwise, the table is printed in a plain text format.
|
||||
"""
|
||||
table_data = []
|
||||
total_memory, total_runtime = sum(sac_stats.memory), sum(sac_stats.runtimes)
|
||||
discarded_mem: int = 0
|
||||
recomp_runtime: float = 0.0
|
||||
|
||||
def append_row(
|
||||
op_indices: set[int],
|
||||
func_names: set[str],
|
||||
msps: Optional[float] = None,
|
||||
stored: Optional[bool] = False,
|
||||
recomputed: Optional[bool] = False,
|
||||
) -> None:
|
||||
row = [
|
||||
str(op_indices),
|
||||
str(func_names),
|
||||
f"{discarded_mem / total_memory:.4f}",
|
||||
str(discarded_mem),
|
||||
f"{recomp_runtime / total_runtime:.4f}",
|
||||
str(recomp_runtime),
|
||||
f"{msps:.2e}" if msps is not None else str(nan),
|
||||
str(stored),
|
||||
str(recomputed),
|
||||
]
|
||||
table_data.append(row)
|
||||
|
||||
stored_ops, recomputed_ops, inplace_op_groups, random_ops_group, msps_meta = (
|
||||
greedy_order_meta.stored_ops,
|
||||
greedy_order_meta.recomputed_ops,
|
||||
greedy_order_meta.inplace_op_groups,
|
||||
greedy_order_meta.random_ops_group,
|
||||
greedy_order_meta.msps_meta,
|
||||
)
|
||||
|
||||
for op_idx in recomputed_ops:
|
||||
op_indices: set[int] = {op_idx}
|
||||
if op_idx in inplace_op_groups:
|
||||
op_indices.update(inplace_op_groups[op_idx])
|
||||
if op_idx in random_ops_group:
|
||||
op_indices.update(random_ops_group[op_idx])
|
||||
discarded_mem += sum(sac_stats.memory[i] for i in op_indices)
|
||||
recomp_runtime += sum(sac_stats.runtimes[i] for i in op_indices)
|
||||
func_names = {sac_stats.func_names[i] for i in op_indices}
|
||||
append_row(op_indices, func_names, recomputed=True)
|
||||
|
||||
for cand in msps_meta:
|
||||
discarded_mem += cand.memory
|
||||
recomp_runtime += cand.runtime
|
||||
op_indices = {cand.op_idx}
|
||||
if cand.op_idx in inplace_op_groups:
|
||||
op_indices.update(inplace_op_groups[cand.op_idx])
|
||||
if cand.op_idx in random_ops_group:
|
||||
op_indices.update(random_ops_group[cand.op_idx])
|
||||
append_row(op_indices, cand.func_names, msps=cand.msps)
|
||||
|
||||
for op_idx in stored_ops:
|
||||
op_indices = {op_idx}
|
||||
if op_idx in inplace_op_groups:
|
||||
op_indices.update(inplace_op_groups[op_idx])
|
||||
if op_idx in random_ops_group:
|
||||
op_indices.update(random_ops_group[op_idx])
|
||||
discarded_mem += sum(sac_stats.memory[i] for i in op_indices)
|
||||
recomp_runtime += sum(sac_stats.runtimes[i] for i in op_indices)
|
||||
func_names = {sac_stats.func_names[i] for i in op_indices}
|
||||
append_row(op_indices, func_names, stored=True)
|
||||
|
||||
headers = [
|
||||
"Op Id(s)",
|
||||
"Op Name(s)",
|
||||
"Discarded Mem (%)",
|
||||
"Discarded Mem (B)",
|
||||
"Recomp time (%)",
|
||||
"Recomp time (ms)",
|
||||
"MSPS",
|
||||
"Always Stored",
|
||||
"Always Recomputed",
|
||||
]
|
||||
if print_tabular:
|
||||
_display_stats_tabular(headers, table_data)
|
||||
else:
|
||||
max_widths = [0 for _ in range(len(headers))]
|
||||
table_data.insert(0, headers)
|
||||
for row in table_data:
|
||||
for i, elem in enumerate(row):
|
||||
max_widths[i] = max(max_widths[i], len(elem))
|
||||
for row in table_data:
|
||||
print(
|
||||
"\t".join(
|
||||
[f"{elem:<{max_widths[i]}}" for i, elem in enumerate(row)]
|
||||
)
|
||||
)
|
||||
|
||||
def pwlf_sac_tradeoff_curve(
|
||||
self,
|
||||
n_segments: int = 2,
|
||||
save_tradeoff_graphs: bool = False,
|
||||
) -> None:
|
||||
"""
|
||||
Fits a piecewise linear function with the specified sumber of segments to the SAC trade-off curve of
|
||||
discarded memory vs recomputation time.
|
||||
|
||||
Args:
|
||||
n_segments (int, optional): The number of segments to be used for fitting the piecewise linear function to
|
||||
the trade-off curve. Defaults to 2.
|
||||
save_tradeoff_graphs (bool, optional): Whether to save the trade-off graphs to file. Defaults to False.
|
||||
|
||||
If save_tradeoff_graphs is True, the trade-off graphs are saved to file using the module FQN as the filename.
|
||||
"""
|
||||
for mod_fqn, sac_stats in self.sac_mod_stats.items():
|
||||
self.sac_mod_tradeoff_stats[mod_fqn] = self._get_sac_tradeoff_pwlf_stats(
|
||||
sac_stats=sac_stats,
|
||||
greedy_order_meta=self.sac_mod_greedy_order_meta[mod_fqn],
|
||||
n_segments=n_segments,
|
||||
save_tradeoff_graph=save_tradeoff_graphs,
|
||||
filename=mod_fqn,
|
||||
)
|
||||
|
||||
def display_modulewise_sac_stats(
|
||||
self, depth: int = 2, print_tabular: bool = False
|
||||
) -> None:
|
||||
"""
|
||||
Displays the SAC and trade-off statistics for each module.
|
||||
|
||||
Args:
|
||||
depth (int, optional): The maximum depth of modules to display. Defaults to 2.
|
||||
print_tabular (bool, optional): Whether to print the statistics in a tabular format. Defaults to False.
|
||||
|
||||
Prints:
|
||||
For each module with depth less than or equal to the specified depth:
|
||||
1. The SAC statistics for the module (using display_sac_stats).
|
||||
2. The SAC trade-off statistics for the module (using display_sac_tradeoff_stats).
|
||||
|
||||
If print_tabular is True, the statistics are printed in a tabular format.
|
||||
Otherwise, the statistics are printed in a plain text format.
|
||||
"""
|
||||
for mod_fqn, sac_stats in self.sac_mod_stats.items():
|
||||
mod_depth = mod_fqn.count(".") + 1
|
||||
if mod_depth > depth:
|
||||
continue
|
||||
print(f"Module: {mod_fqn}")
|
||||
self.display_sac_stats(sac_stats, print_tabular)
|
||||
print(f"AC Trade-off for Module: {mod_fqn} MSPS = Memory/Runtime")
|
||||
self.display_sac_tradeoff_stats(
|
||||
self.sac_mod_greedy_order_meta[mod_fqn], sac_stats, print_tabular
|
||||
)
|
||||
|
||||
def __call__(self, estimate_mode_type: str) -> Self:
|
||||
"""
|
||||
Sets the estimate mode type.
|
||||
|
||||
Currently supported modes:
|
||||
- "operator-level-benchmark": Estimates runtime using operator benchmarking.
|
||||
- "operator-level-cost-model": Estimates runtime using roofline cost model.
|
||||
|
||||
Args:
|
||||
estimate_mode_type (str): The type of estimate mode to use.
|
||||
|
||||
Returns:
|
||||
SACEstimator: The SAC estimator instance.
|
||||
|
||||
Raises:
|
||||
NotImplementedError: If the estimate mode type is not supported.
|
||||
"""
|
||||
if estimate_mode_type == "operator-level-benchmark":
|
||||
self._estimate_runtime = RuntimeEstimator._benchmark_estimate
|
||||
elif estimate_mode_type == "operator-level-cost-model":
|
||||
self._estimate_runtime = RuntimeEstimator._roofline_estimate
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"estimate_mode_type {estimate_mode_type} not supported"
|
||||
)
|
||||
return self
|
||||
|
||||
def __enter__(self) -> Self: # type: ignore[no-untyped-def]
|
||||
fake_mode = active_fake_mode()
|
||||
assert isinstance(fake_mode, FakeTensorMode), (
|
||||
"SAC Estimator should be called in FakeTensorMode"
|
||||
)
|
||||
RuntimeEstimator.fake_mode = fake_mode
|
||||
self._mod_tracker.register_user_hooks(
|
||||
pre_fw_hook=self._pre_fw_hook,
|
||||
post_fw_hook=self._post_fw_hook,
|
||||
)
|
||||
self._mod_tracker.__enter__()
|
||||
self._saved_tensor_hook_ctx.__enter__()
|
||||
return super().__enter__()
|
||||
|
||||
def __exit__(self, *args: Any) -> None: # type: ignore[no-untyped-def]
|
||||
self._saved_tensor_hook_ctx.__exit__()
|
||||
self._mod_tracker.__exit__(*args)
|
||||
super().__exit__(*args)
|
295
venv/Lib/site-packages/torch/distributed/_tools/sac_ilp.py
Normal file
295
venv/Lib/site-packages/torch/distributed/_tools/sac_ilp.py
Normal file
|
@ -0,0 +1,295 @@
|
|||
import logging
|
||||
import math
|
||||
from enum import IntEnum
|
||||
from typing import Optional
|
||||
|
||||
from torch.distributed._tools.ilp_utils import Graph, is_submodule
|
||||
from torch.distributed._tools.sac_estimator import SACStats
|
||||
|
||||
|
||||
try:
|
||||
from pulp import ( # type: ignore[import-untyped,import-not-found]
|
||||
lpDot,
|
||||
LpInteger,
|
||||
LpMaximize,
|
||||
LpMinimize,
|
||||
LpProblem,
|
||||
LpStatus,
|
||||
lpSum,
|
||||
LpVariable,
|
||||
PULP_CBC_CMD,
|
||||
value,
|
||||
)
|
||||
except ImportError as err:
|
||||
raise ImportError(
|
||||
"Please install pulp package. See: https://github.com/coin-or/pulp."
|
||||
) from err
|
||||
|
||||
# Create a logger object
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Set the logging level to INFO
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
|
||||
def sac_milp(
|
||||
graph: Graph,
|
||||
memory_budget: float,
|
||||
world_size: int = 1,
|
||||
ac_units: Optional[list[str]] = None,
|
||||
fsdp_units: Optional[list[str]] = None,
|
||||
) -> tuple[dict[str, float], float, int]:
|
||||
"""
|
||||
MILP to decide which modules to AC and how much memory to discard.
|
||||
The objective is to minimize recomputation time.
|
||||
The constraint is to ensure peak memory is under budget.
|
||||
|
||||
Args:
|
||||
graph: graph representation of the model as a module submodule tree
|
||||
where each node is a submodule with memory & runtime stats
|
||||
memory_budget: memory budget in GiB
|
||||
world_size: number of GPUs. In the case of FSDP, world_size will be
|
||||
used to compute the amount of parameter and gradient memory on each rank
|
||||
ac_units: a list of user-specified AC units.
|
||||
fsdp_units: a list of FSDP units. AC units cannot be supermodules of FSDP units.
|
||||
|
||||
Returns:
|
||||
Dict[str, float]: the optimal SAC solution, mapping from module fqn to
|
||||
the percentage of activation memory to **discard**
|
||||
float: the recomputation time of the optimal SAC solution
|
||||
int: upper bound on the peak memory of the optimal SAC solution.
|
||||
note that value of -1 means that the ILP solver failed to find a solution.
|
||||
|
||||
"""
|
||||
num_nodes = len(graph.nodes)
|
||||
M = 10**2 # note: numerical issue may occur if M is too big
|
||||
MEM_MULTIPLIER = 2**30
|
||||
|
||||
# Create a MILP problem
|
||||
prob = LpProblem("SAC", LpMinimize)
|
||||
|
||||
# Create decision variables
|
||||
# y_i: indicator for if module i is AC'ed
|
||||
y = LpVariable.matrix("y", list(range(num_nodes)), 0, 1, LpInteger)
|
||||
# r_i: percentage of discarded activation memory
|
||||
r = LpVariable.matrix("r", list(range(num_nodes)), 0, 1)
|
||||
# d_i: discarded activation memory for module i
|
||||
d = LpVariable.matrix("d", list(range(num_nodes)), 0)
|
||||
# a_i: total activation memory at module i
|
||||
a = LpVariable.matrix("a", list(range(num_nodes)), 0)
|
||||
# m_i: memory at module i, combining parameters, gradients, and activations
|
||||
m = LpVariable.matrix("m", list(range(num_nodes)), 0)
|
||||
# rcp_i: percentage of recomputation time
|
||||
rcp = LpVariable.matrix("rcp", list(range(num_nodes)), 0)
|
||||
# rct_i: recomputation time for module i (in ms)
|
||||
rct = LpVariable.matrix("rct", list(range(num_nodes)), 0)
|
||||
# max_m: peak memory
|
||||
max_m = LpVariable("max_m", 0)
|
||||
|
||||
# Add constraints
|
||||
# [Constraint] User specified AC units
|
||||
if ac_units:
|
||||
ac_units_set = set(ac_units)
|
||||
for i in range(num_nodes):
|
||||
if graph.nodes[i]["fqn"] not in ac_units_set:
|
||||
prob += y[i] == 0
|
||||
|
||||
# [Constraint] AC units cannot be supmodules of user specified FSDP units
|
||||
if fsdp_units:
|
||||
for i in range(num_nodes):
|
||||
if any(
|
||||
is_submodule(fsdp_unit, graph.nodes[i]["fqn"])
|
||||
for fsdp_unit in fsdp_units
|
||||
):
|
||||
prob += y[i] == 0
|
||||
|
||||
# [Constraint] No nested AC units
|
||||
for i in range(num_nodes):
|
||||
for j in range(i + 1, num_nodes):
|
||||
if graph.ad_matrix[i][j] == 1:
|
||||
prob += y[i] + y[j] <= 1
|
||||
|
||||
# [Constraint] Do not AC leaf modules
|
||||
for i in range(num_nodes):
|
||||
if graph.nodes[i]["is_leaf"]:
|
||||
prob += y[i] == 0
|
||||
|
||||
# [Constraint] Express amount of discarded activation memory
|
||||
for i in range(num_nodes):
|
||||
# There are two measures for activation memory: ACM and IA
|
||||
# 1. IA is the activation memory saved when not using AC
|
||||
# 2. ACM is the total activation memory, including those
|
||||
# that are not typically saved when not using AC
|
||||
# Note: ACM >= IA
|
||||
if (not graph.nodes[i]["is_leaf"]) and graph.nodes[i][
|
||||
"sac_memory"
|
||||
] < graph.nodes[i]["act_fw_per_module"]:
|
||||
logger.warning("For module {%s}: ", graph.nodes[i]["fqn"])
|
||||
logger.warning(
|
||||
"activation memory from memory tracker is {%d},",
|
||||
graph.nodes[i]["act_fw_per_module"],
|
||||
)
|
||||
logger.warning(
|
||||
"activation memory from SAC estimator is {%d}.",
|
||||
graph.nodes[i]["sac_memory"],
|
||||
)
|
||||
logger.warning("Something is wrong. Please check!")
|
||||
logger.warning("Overriding the latter with the former.")
|
||||
graph.nodes[i]["sac_memory"] = graph.nodes[i]["act_fw_per_module"]
|
||||
ACM_i = graph.nodes[i]["sac_memory"] / MEM_MULTIPLIER
|
||||
IA_i = graph.nodes[i]["act_fw_per_module"] / MEM_MULTIPLIER
|
||||
prob += d[i] == ACM_i * r[i] - (ACM_i - IA_i) * y[i]
|
||||
|
||||
# [Constraint] Ensure correctness of r_i
|
||||
# There are two parts to its correctness
|
||||
# 1. r_i > 0 only if y_i == 1 (discard only if it is an AC unit)
|
||||
# 2. r_i needs to be large enough to cover the difference between
|
||||
# ACM and IA. Otherwise, we are not saving any memory
|
||||
for i in range(num_nodes):
|
||||
prob += y[i] >= r[i]
|
||||
if graph.nodes[i]["is_leaf"]:
|
||||
continue
|
||||
ACM_i = graph.nodes[i]["sac_memory"] / MEM_MULTIPLIER
|
||||
IA_i = graph.nodes[i]["act_fw_per_module"] / MEM_MULTIPLIER
|
||||
prob += r[i] >= (ACM_i - IA_i) / ACM_i * y[i]
|
||||
|
||||
# [Constraint] Express total activation memory in the backward pass
|
||||
for i in range(num_nodes):
|
||||
AG_i = graph.nodes[i]["act_grad_per_module"] / MEM_MULTIPLIER
|
||||
TA_i = graph.nodes[i]["act_total"] / MEM_MULTIPLIER
|
||||
# related to discarded amount of memory
|
||||
pos = graph.nodes[i]["pos_fw_post_order"]
|
||||
coeff = [0] * num_nodes
|
||||
for p in range(pos):
|
||||
j = graph.name2node[graph.fw_post_order[p]]["index"]
|
||||
coeff[j] = 1
|
||||
prob += a[i] == TA_i + AG_i - lpDot(coeff, d)
|
||||
|
||||
# [Constraint] Express the total amount of memory at each module
|
||||
# Note that unsharded parameters and gradients are not included here
|
||||
P_1 = graph.nodes[0]["param_per_module"] / MEM_MULTIPLIER
|
||||
for i in range(num_nodes):
|
||||
TG_i = graph.nodes[i]["grad_total"] / MEM_MULTIPLIER
|
||||
prob += m[i] == a[i] + (P_1 + TG_i) / world_size
|
||||
|
||||
# [Constraint] Express peak memory
|
||||
for i in range(num_nodes):
|
||||
prob += max_m >= m[i]
|
||||
|
||||
# [Constraint] Express percentage of recomputation time
|
||||
for i in range(num_nodes):
|
||||
for s in range(graph.nodes[i]["n_segments"]):
|
||||
slope = graph.nodes[i]["slopes"][s]
|
||||
intercept = graph.nodes[i]["intercepts"][s]
|
||||
prob += rcp[i] >= slope * r[i] + intercept
|
||||
|
||||
# [Constraint] Express recomputation time
|
||||
# rct_i = (rcp_i * ACT_i) if y_i == 1 else 0
|
||||
for i in range(num_nodes):
|
||||
ACT_i = graph.nodes[i]["sac_runtime"]
|
||||
prob += rct[i] <= M * y[i]
|
||||
prob += rct[i] <= ACT_i * rcp[i]
|
||||
prob += rct[i] >= ACT_i * rcp[i] - M * (1 - y[i])
|
||||
|
||||
# [Constraint] Peak memory should be below budget
|
||||
prob += max_m <= memory_budget
|
||||
|
||||
# Set Objeictive
|
||||
prob += lpSum(rct)
|
||||
|
||||
# Solve
|
||||
solver = PULP_CBC_CMD(gapRel=0.05, timeLimit=180, msg=0)
|
||||
status = prob.solve(solver)
|
||||
|
||||
# If solver fails, print status and return empty solution
|
||||
if status != 1:
|
||||
logger.error("Solver failed to find a solution: %s", LpStatus[status])
|
||||
return {}, 0, -1
|
||||
|
||||
# Gather and return solution if optimal solution is found
|
||||
ac_decisions = {}
|
||||
for i in range(num_nodes):
|
||||
if round(y[i].varValue) == 1:
|
||||
ac_decisions[graph.nodes[i]["fqn"]] = round(r[i].varValue, 4)
|
||||
recomputation_time = round(value(prob.objective), 2)
|
||||
peak_mem = round(max_m.varValue * MEM_MULTIPLIER)
|
||||
|
||||
return ac_decisions, recomputation_time, peak_mem
|
||||
|
||||
|
||||
class SACDecision(IntEnum):
|
||||
RECOMPUTE = 0
|
||||
SAVE = 1
|
||||
|
||||
|
||||
def get_optimal_checkpointing_policy_per_module(
|
||||
sac_stats: SACStats, memory_budget: float
|
||||
) -> list[int]:
|
||||
"""
|
||||
This is adapted from --
|
||||
https://github.com/facebookresearch/xformers/blob/c6c0ac31f1b08542a0bc27278c6ed10f825f6963/xformers/checkpoint.py#L375
|
||||
|
||||
Given the SACStats of a module, including list of operators, their memory, runtimes, and metadata,
|
||||
decide via MILP an optimal set of operators to checkpoint under a given ``memory_budget``.
|
||||
|
||||
Args:
|
||||
sac_stats: the SACStats object of the module
|
||||
memory_budget: a float between zero and one
|
||||
|
||||
Returns:
|
||||
List[int]: the decision whether each operator should be saved (1) or recomptued (0).
|
||||
"""
|
||||
if not (0 <= memory_budget <= 1):
|
||||
raise ValueError(
|
||||
f"`memory_budget` must be a float between 0 and 1. Got {memory_budget}."
|
||||
)
|
||||
num_ops = len(sac_stats.func_names)
|
||||
|
||||
# Create a MILP problem
|
||||
prob = LpProblem("SAC-per-module", LpMaximize)
|
||||
|
||||
# Create decision variables
|
||||
# x[i] = 1 means the i-th operator should be saved, otherwise it should be recomputed
|
||||
x = LpVariable.matrix("x", list(range(num_ops)), 0, 1, LpInteger)
|
||||
|
||||
# Add constraints
|
||||
# [Constraint] random ops should be saved if ``force_store_random`` is True
|
||||
# otherwise, random ops should either be all recomputed or all saved
|
||||
if sac_stats.force_store_random:
|
||||
for i in sac_stats.rand_ops:
|
||||
prob += x[i] == SACDecision.SAVE.value
|
||||
else:
|
||||
for i1, i2 in zip(sac_stats.rand_ops[:-1], sac_stats.rand_ops[1:]):
|
||||
prob += x[i1] == x[i2]
|
||||
|
||||
# [Constraint] view-like ops should always be recomputed
|
||||
for i in sac_stats.view_like_ops:
|
||||
prob += x[i] == SACDecision.RECOMPUTE.value
|
||||
|
||||
# [Constraint] inplace ops should always be done in conjunction with its parent op
|
||||
for op, op_parent in sac_stats.inplace_ops:
|
||||
if op != op_parent:
|
||||
prob += x[op] == x[op_parent]
|
||||
else:
|
||||
prob += x[op] == SACDecision.SAVE.value
|
||||
|
||||
# [Constraint] saved memory should be under the ``memory_budget``
|
||||
max_memory = math.ceil(memory_budget * sum(sac_stats.memory))
|
||||
prob += lpDot(x, sac_stats.memory) <= max_memory
|
||||
|
||||
# [Objective] minimize recomputation time, note the ILP is a maximization problem
|
||||
# because x[i] == 1 means the op is saved (not recomputed), and thus recomputation
|
||||
# time is sum(sac_stats.runtimes) - lpDot(x, sac_stats.runtimes)
|
||||
prob += lpDot(x, sac_stats.runtimes)
|
||||
|
||||
# Solve
|
||||
solver = PULP_CBC_CMD(gapRel=0.05, timeLimit=10, msg=0)
|
||||
status = prob.solve(solver)
|
||||
|
||||
# If solver fails, print status and return empty solution
|
||||
if status != 1:
|
||||
logger.error("Solver failed to find a solution: %s", LpStatus[status])
|
||||
return []
|
||||
|
||||
# Gather and return solution if optimal solution is found
|
||||
return [round(x[i].varValue) for i in range(num_ops)]
|
Loading…
Add table
Add a link
Reference in a new issue