fastvideo.v1.platforms.interface#

Module Contents#

Classes#

Data#

API#

class fastvideo.v1.platforms.interface.DeviceCapability[source]#

Bases: typing.NamedTuple

as_version_str() str[source]#
major: int[source]#

None

minor: int[source]#

None

to_int() int[source]#

Express device capability as an integer <major><minor>.

It is assumed that the minor version is always a single digit.

class fastvideo.v1.platforms.interface.Platform[source]#
device_name: str[source]#

None

device_type: str[source]#

None

dispatch_key: str[source]#

‘CPU’

classmethod get_attn_backend_cls(selected_backend: Optional[fastvideo.v1.platforms.interface._Backend], head_size: int, dtype: torch.dtype) str[source]#

Get the attention backend class of a device.

abstract classmethod get_current_memory_usage(device: Optional[torch.types.Device] = None) float[source]#

Return the memory usage in bytes.

classmethod get_device_capability(device_id: int = 0) Optional[fastvideo.v1.platforms.interface.DeviceCapability][source]#

Stateless version of :func:torch.cuda.get_device_capability.

classmethod get_device_communicator_cls() str[source]#

Get device specific communicator class for distributed communication.

abstract classmethod get_device_name(device_id: int = 0) str[source]#

Get the name of a device.

abstract classmethod get_device_total_memory(device_id: int = 0) int[source]#

Get the total memory of a device in bytes.

abstract classmethod get_device_uuid(device_id: int = 0) str[source]#

Get the uuid of a device, e.g. the PCI bus ID.

classmethod has_device_capability(capability: Union[Tuple[int, int], int], device_id: int = 0) bool[source]#

Test whether this platform is compatible with a device capability.

The capability argument can either be:

  • A tuple (major, minor).

  • An integer <major><minor>. (See :meth:DeviceCapability.to_int)

classmethod inference_mode()[source]#

A device-specific wrapper of torch.inference_mode.

This wrapper is recommended because some hardware backends such as TPU do not support torch.inference_mode. In such a case, they will fall back to torch.no_grad by overriding this method.

abstract classmethod is_async_output_supported(enforce_eager: Optional[bool]) bool[source]#

Check if the current platform supports async output.

is_cpu() bool[source]#
is_cuda() bool[source]#
is_cuda_alike() bool[source]#

Stateless version of :func:torch.cuda.is_available.

is_out_of_tree() bool[source]#
is_rocm() bool[source]#
is_tpu() bool[source]#
classmethod seed_everything(seed: Optional[int] = None) None[source]#

Set the seed of each random module. torch.manual_seed will set seed on all devices.

Loosely based on: https://github.com/Lightning-AI/pytorch-lightning/blob/2.4.0/src/lightning/fabric/utilities/seed.py#L20

simple_compile_backend: str[source]#

‘inductor’

supported_quantization: list[str][source]#

[]

classmethod verify_model_arch(model_arch: str) None[source]#

Verify whether the current platform supports the specified model architecture.

  • This will raise an Error or Warning based on the model support on the current platform.

  • By default all models are considered supported.

classmethod verify_quantization(quant: str) None[source]#

Verify whether the quantization is supported by the current platform.

class fastvideo.v1.platforms.interface.PlatformEnum[source]#

Bases: enum.Enum

CPU[source]#

‘auto(…)’

CUDA[source]#

‘auto(…)’

OOT[source]#

‘auto(…)’

ROCM[source]#

‘auto(…)’

TPU[source]#

‘auto(…)’

UNSPECIFIED[source]#

‘auto(…)’

class fastvideo.v1.platforms.interface.UnspecifiedPlatform[source]#

Bases: fastvideo.v1.platforms.interface.Platform

device_type = <Multiline-String>[source]#
fastvideo.v1.platforms.interface.logger[source]#

‘init_logger(…)’