fastvideo.v1.attention.backends.flash_attn#

Module Contents#

Classes#

Data#

API#

class fastvideo.v1.attention.backends.flash_attn.FlashAttentionBackend[source]#

Bases: fastvideo.v1.attention.backends.abstract.AttentionBackend

accept_output_buffer: bool[source]#

True

abstract static get_builder_cls() Type[fastvideo.v1.attention.backends.abstract.AttentionMetadataBuilder][source]#
static get_impl_cls() Type[fastvideo.v1.attention.backends.flash_attn.FlashAttentionImpl][source]#
abstract static get_metadata_cls() Type[fastvideo.v1.attention.backends.abstract.AttentionMetadata][source]#
static get_name() str[source]#
static get_supported_head_sizes() List[int][source]#
class fastvideo.v1.attention.backends.flash_attn.FlashAttentionImpl(num_heads: int, head_size: int, causal: bool, softmax_scale: float, num_kv_heads: Optional[int] = None, prefix: str = '', **extra_impl_args)[source]#

Bases: fastvideo.v1.attention.backends.abstract.AttentionImpl

forward(query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, attn_metadata: fastvideo.v1.attention.backends.abstract.AttentionMetadata)[source]#
fastvideo.v1.attention.backends.flash_attn.logger[source]#

β€˜init_logger(…)’