fastvideo.platforms.cuda#

Code inside this file can safely assume cuda platform, e.g. importing pynvml. However, it should not initialize cuda context.

Module Contents#

Classes#

Functions#

Data#

API#

fastvideo.platforms.cuda.CudaPlatform[source]#

None

class fastvideo.platforms.cuda.CudaPlatformBase[source]#

Bases: fastvideo.platforms.interface.Platform

device_control_env_var: str[source]#

‘CUDA_VISIBLE_DEVICES’

device_name: str[source]#

‘cuda’

device_type: str[source]#

‘cuda’

dispatch_key: str[source]#

‘CUDA’

classmethod get_attn_backend_cls(selected_backend: fastvideo.platforms.interface.AttentionBackendEnum | None, head_size: int, dtype: torch.dtype) str[source]#
classmethod get_current_memory_usage(device: torch.types.Device | None = None) float[source]#
abstract classmethod get_device_capability(device_id: int = 0) fastvideo.platforms.interface.DeviceCapability | None[source]#
classmethod get_device_communicator_cls() str[source]#
abstract classmethod get_device_name(device_id: int = 0) str[source]#
abstract classmethod get_device_total_memory(device_id: int = 0) int[source]#
classmethod is_async_output_supported(enforce_eager: bool | None) bool[source]#
classmethod log_warnings() None[source]#
class fastvideo.platforms.cuda.NonNvmlCudaPlatform[source]#

Bases: fastvideo.platforms.cuda.CudaPlatformBase

classmethod get_device_capability(device_id: int = 0) fastvideo.platforms.interface.DeviceCapability[source]#
classmethod get_device_name(device_id: int = 0) str[source]#
classmethod get_device_total_memory(device_id: int = 0) int[source]#
class fastvideo.platforms.cuda.NvmlCudaPlatform[source]#

Bases: fastvideo.platforms.cuda.CudaPlatformBase

classmethod get_device_capability(device_id: int = 0) fastvideo.platforms.interface.DeviceCapability | None[source]#
classmethod get_device_name(device_id: int = 0) str[source]#
classmethod get_device_total_memory(device_id: int = 0) int[source]#
classmethod get_device_uuid(device_id: int = 0) str[source]#
classmethod has_device_capability(capability: tuple[int, int] | int, device_id: int = 0) bool[source]#

query if the set of gpus are fully connected by nvlink (1 hop)

classmethod log_warnings() None[source]#
fastvideo.platforms.cuda.device_id_to_physical_device_id(device_id: int) int[source]#
fastvideo.platforms.cuda.logger[source]#

‘init_logger(…)’

fastvideo.platforms.cuda.nvml_available[source]#

False

fastvideo.platforms.cuda.pynvml[source]#

‘import_pynvml(…)’

fastvideo.platforms.cuda.with_nvml_context(fn: collections.abc.Callable[fastvideo.platforms.cuda._P, fastvideo.platforms.cuda._R]) collections.abc.Callable[fastvideo.platforms.cuda._P, fastvideo.platforms.cuda._R][source]#