Skip to content

interface

Classes

fastvideo.platforms.interface.DeviceCapability

Bases: NamedTuple

Functions

fastvideo.platforms.interface.DeviceCapability.to_int
to_int() -> int

Express device capability as an integer <major><minor>.

It is assumed that the minor version is always a single digit.

Source code in fastvideo/platforms/interface.py
def to_int(self) -> int:
    """
    Express device capability as an integer ``<major><minor>``.

    It is assumed that the minor version is always a single digit.
    """
    assert 0 <= self.minor < 10
    return self.major * 10 + self.minor

fastvideo.platforms.interface.Platform

Functions

fastvideo.platforms.interface.Platform.get_attn_backend_cls classmethod
get_attn_backend_cls(selected_backend: AttentionBackendEnum | None, head_size: int, dtype: dtype) -> str

Get the attention backend class of a device.

Source code in fastvideo/platforms/interface.py
@classmethod
def get_attn_backend_cls(cls, selected_backend: AttentionBackendEnum | None,
                         head_size: int, dtype: torch.dtype) -> str:
    """Get the attention backend class of a device."""
    return ""
fastvideo.platforms.interface.Platform.get_cpu_architecture classmethod
get_cpu_architecture() -> CpuArchEnum

Get the CPU architecture of the current platform.

Source code in fastvideo/platforms/interface.py
@classmethod
def get_cpu_architecture(cls) -> CpuArchEnum:
    """Get the CPU architecture of the current platform."""
    return CpuArchEnum.UNSPECIFIED
fastvideo.platforms.interface.Platform.get_current_memory_usage classmethod
get_current_memory_usage(device: Device | None = None) -> float

Return the memory usage in bytes.

Source code in fastvideo/platforms/interface.py
@classmethod
def get_current_memory_usage(cls,
                             device: torch.types.Device | None = None
                             ) -> float:
    """
    Return the memory usage in bytes.
    """
    raise NotImplementedError
fastvideo.platforms.interface.Platform.get_device_capability classmethod
get_device_capability(device_id: int = 0) -> DeviceCapability | None

Stateless version of :func:torch.cuda.get_device_capability.

Source code in fastvideo/platforms/interface.py
@classmethod
def get_device_capability(
    cls,
    device_id: int = 0,
) -> DeviceCapability | None:
    """Stateless version of :func:`torch.cuda.get_device_capability`."""
    return None
fastvideo.platforms.interface.Platform.get_device_communicator_cls classmethod
get_device_communicator_cls() -> str

Get device specific communicator class for distributed communication.

Source code in fastvideo/platforms/interface.py
@classmethod
def get_device_communicator_cls(cls) -> str:
    """
    Get device specific communicator class for distributed communication.
    """
    return "fastvideo.distributed.device_communicators.base_device_communicator.DeviceCommunicatorBase"  # noqa
fastvideo.platforms.interface.Platform.get_device_name classmethod
get_device_name(device_id: int = 0) -> str

Get the name of a device.

Source code in fastvideo/platforms/interface.py
@classmethod
def get_device_name(cls, device_id: int = 0) -> str:
    """Get the name of a device."""
    raise NotImplementedError
fastvideo.platforms.interface.Platform.get_device_total_memory classmethod
get_device_total_memory(device_id: int = 0) -> int

Get the total memory of a device in bytes.

Source code in fastvideo/platforms/interface.py
@classmethod
def get_device_total_memory(cls, device_id: int = 0) -> int:
    """Get the total memory of a device in bytes."""
    raise NotImplementedError
fastvideo.platforms.interface.Platform.get_device_uuid classmethod
get_device_uuid(device_id: int = 0) -> str

Get the uuid of a device, e.g. the PCI bus ID.

Source code in fastvideo/platforms/interface.py
@classmethod
def get_device_uuid(cls, device_id: int = 0) -> str:
    """Get the uuid of a device, e.g. the PCI bus ID."""
    raise NotImplementedError
fastvideo.platforms.interface.Platform.get_torch_device classmethod
get_torch_device()

Check if the current platform supports torch device.

Source code in fastvideo/platforms/interface.py
@classmethod
def get_torch_device(cls):
    """
    Check if the current platform supports torch device.
    """
    raise NotImplementedError
fastvideo.platforms.interface.Platform.has_device_capability classmethod
has_device_capability(capability: tuple[int, int] | int, device_id: int = 0) -> bool

Test whether this platform is compatible with a device capability.

The capability argument can either be:

  • A tuple (major, minor).
  • An integer <major><minor>. (See :meth:DeviceCapability.to_int)
Source code in fastvideo/platforms/interface.py
@classmethod
def has_device_capability(
    cls,
    capability: tuple[int, int] | int,
    device_id: int = 0,
) -> bool:
    """
    Test whether this platform is compatible with a device capability.

    The ``capability`` argument can either be:

    - A tuple ``(major, minor)``.
    - An integer ``<major><minor>``. (See :meth:`DeviceCapability.to_int`)
    """
    current_capability = cls.get_device_capability(device_id=device_id)
    if current_capability is None:
        return False

    if isinstance(capability, tuple):
        return current_capability >= capability

    return current_capability.to_int() >= capability
fastvideo.platforms.interface.Platform.inference_mode classmethod
inference_mode()

A device-specific wrapper of torch.inference_mode.

This wrapper is recommended because some hardware backends such as TPU do not support torch.inference_mode. In such a case, they will fall back to torch.no_grad by overriding this method.

Source code in fastvideo/platforms/interface.py
@classmethod
def inference_mode(cls):
    """A device-specific wrapper of `torch.inference_mode`.

    This wrapper is recommended because some hardware backends such as TPU
    do not support `torch.inference_mode`. In such a case, they will fall
    back to `torch.no_grad` by overriding this method.
    """
    return torch.inference_mode(mode=True)
fastvideo.platforms.interface.Platform.is_async_output_supported classmethod
is_async_output_supported(enforce_eager: bool | None) -> bool

Check if the current platform supports async output.

Source code in fastvideo/platforms/interface.py
@classmethod
def is_async_output_supported(cls, enforce_eager: bool | None) -> bool:
    """
    Check if the current platform supports async output.
    """
    raise NotImplementedError
fastvideo.platforms.interface.Platform.is_cuda_alike
is_cuda_alike() -> bool

Stateless version of :func:torch.cuda.is_available.

Source code in fastvideo/platforms/interface.py
def is_cuda_alike(self) -> bool:
    """Stateless version of :func:`torch.cuda.is_available`."""
    return self._enum in (PlatformEnum.CUDA, PlatformEnum.ROCM)
fastvideo.platforms.interface.Platform.seed_everything classmethod
seed_everything(seed: int | None = None) -> None

Set the seed of each random module. torch.manual_seed will set seed on all devices.

Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20

Source code in fastvideo/platforms/interface.py
@classmethod
def seed_everything(cls, seed: int | None = None) -> None:
    """
    Set the seed of each random module.
    `torch.manual_seed` will set seed on all devices.

    Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20
    """
    if seed is not None:
        random.seed(seed)
        np.random.seed(seed)
        torch.manual_seed(seed)
        torch.cuda.manual_seed_all(seed)
fastvideo.platforms.interface.Platform.verify_model_arch classmethod
verify_model_arch(model_arch: str) -> None

Verify whether the current platform supports the specified model architecture.

  • This will raise an Error or Warning based on the model support on the current platform.
  • By default all models are considered supported.
Source code in fastvideo/platforms/interface.py
@classmethod
def verify_model_arch(cls, model_arch: str) -> None:
    """
    Verify whether the current platform supports the specified model
    architecture.

    - This will raise an Error or Warning based on the model support on
    the current platform.
    - By default all models are considered supported.
    """
    pass
fastvideo.platforms.interface.Platform.verify_quantization classmethod
verify_quantization(quant: str) -> None

Verify whether the quantization is supported by the current platform.

Source code in fastvideo/platforms/interface.py
@classmethod
def verify_quantization(cls, quant: str) -> None:
    """
    Verify whether the quantization is supported by the current platform.
    """
    if cls.supported_quantization and \
        quant not in cls.supported_quantization:
        raise ValueError(
            f"{quant} quantization is currently not supported in "
            f"{cls.device_name}.")

Functions