Skip to content

models

Classes

fastvideo.configs.models.DiTConfig dataclass

DiTConfig(arch_config: DiTArchConfig = DiTArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None)

Bases: ModelConfig

Functions

fastvideo.configs.models.DiTConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'dit-config') -> Any

Add CLI arguments for DiTConfig fields

Source code in fastvideo/configs/models/dits/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "dit-config") -> Any:
    """Add CLI arguments for DiTConfig fields"""
    parser.add_argument(
        f"--{prefix}.prefix",
        type=str,
        dest=f"{prefix.replace('-', '_')}.prefix",
        default=DiTConfig.prefix,
        help="Prefix for the DiT model",
    )

    parser.add_argument(
        f"--{prefix}.quant-config",
        type=str,
        dest=f"{prefix.replace('-', '_')}.quant_config",
        default=None,
        help="Quantization configuration for the DiT model",
    )

    return parser

fastvideo.configs.models.VAEConfig dataclass

VAEConfig(arch_config: VAEArchConfig = VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = True, use_temporal_tiling: bool = True, use_parallel_tiling: bool = True, use_temporal_scaling_frames: bool = True)

Bases: ModelConfig

Functions

fastvideo.configs.models.VAEConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'vae-config') -> Any

Add CLI arguments for VAEConfig fields

Source code in fastvideo/configs/models/vaes/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "vae-config") -> Any:
    """Add CLI arguments for VAEConfig fields"""
    parser.add_argument(
        f"--{prefix}.load-encoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_encoder",
        default=VAEConfig.load_encoder,
        help="Whether to load the VAE encoder",
    )
    parser.add_argument(
        f"--{prefix}.load-decoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_decoder",
        default=VAEConfig.load_decoder,
        help="Whether to load the VAE decoder",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_height",
        default=VAEConfig.tile_sample_min_height,
        help="Minimum height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_width",
        default=VAEConfig.tile_sample_min_width,
        help="Minimum width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_num_frames",
        default=VAEConfig.tile_sample_min_num_frames,
        help="Minimum number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_height",
        default=VAEConfig.tile_sample_stride_height,
        help="Stride height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_width",
        default=VAEConfig.tile_sample_stride_width,
        help="Stride width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_num_frames",
        default=VAEConfig.tile_sample_stride_num_frames,
        help="Stride number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.blend-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.blend_num_frames",
        default=VAEConfig.blend_num_frames,
        help="Number of frames to blend for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.use-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_tiling",
        default=VAEConfig.use_tiling,
        help="Whether to use tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-temporal-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_temporal_tiling",
        default=VAEConfig.use_temporal_tiling,
        help="Whether to use temporal tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-parallel-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_parallel_tiling",
        default=VAEConfig.use_parallel_tiling,
        help="Whether to use parallel tiling for VAE",
    )

    return parser

Modules

fastvideo.configs.models.dits

Classes

fastvideo.configs.models.dits.Cosmos25VideoConfig dataclass
Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Configuration for Cosmos 2.5 video generation model.

Modules

fastvideo.configs.models.dits.base
Classes
fastvideo.configs.models.dits.base.DiTConfig dataclass
DiTConfig(arch_config: DiTArchConfig = DiTArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None)

Bases: ModelConfig

Functions
fastvideo.configs.models.dits.base.DiTConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'dit-config') -> Any

Add CLI arguments for DiTConfig fields

Source code in fastvideo/configs/models/dits/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "dit-config") -> Any:
    """Add CLI arguments for DiTConfig fields"""
    parser.add_argument(
        f"--{prefix}.prefix",
        type=str,
        dest=f"{prefix.replace('-', '_')}.prefix",
        default=DiTConfig.prefix,
        help="Prefix for the DiT model",
    )

    parser.add_argument(
        f"--{prefix}.quant-config",
        type=str,
        dest=f"{prefix.replace('-', '_')}.quant_config",
        default=None,
        help="Quantization configuration for the DiT model",
    )

    return parser
fastvideo.configs.models.dits.cosmos2_5
Classes
fastvideo.configs.models.dits.cosmos2_5.Cosmos25ArchConfig dataclass
Cosmos25ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_transformer_blocks])(), _compile_conditions: list = list(), param_names_mapping: dict = (lambda: {'^net\\.x_embedder\\.proj\\.1\\.(.*)$': 'patch_embed.proj.\\1', '^net\\.t_embedder\\.1\\.linear_1\\.(.*)$': 'time_embed.t_embedder.linear_1.\\1', '^net\\.t_embedder\\.1\\.linear_2\\.(.*)$': 'time_embed.t_embedder.linear_2.\\1', '^net\\.t_embedding_norm\\.(.*)$': 'time_embed.norm.\\1', '^net\\.crossattn_proj\\.0\\.weight$': 'crossattn_proj.0.weight', '^net\\.crossattn_proj\\.0\\.bias$': 'crossattn_proj.0.bias', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer1\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_in.\\2', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer2\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_out.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.2.\\2', '^net\\.blocks\\.(\\d+)\\.layer_norm_self_attn\\._extra_state$': 'transformer_blocks.\\1.norm1.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_cross_attn\\._extra_state$': 'transformer_blocks.\\1.norm2.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_mlp\\._extra_state$': 'transformer_blocks.\\1.norm3.norm._extra_state', '^net\\.final_layer\\.linear\\.(.*)$': 'final_layer.proj_out.\\1', '^net\\.final_layer\\.adaln_modulation\\.1\\.(.*)$': 'final_layer.linear_1.\\1', '^net\\.final_layer\\.adaln_modulation\\.2\\.(.*)$': 'final_layer.linear_2.\\1'})(), reverse_param_names_mapping: dict = dict(), lora_param_names_mapping: dict = (lambda: {'^transformer_blocks\\.(\\d+)\\.attn1\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.mlp\\.(.*)$': 'transformer_blocks.\\1.mlp.\\2'})(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (SLIDING_TILE_ATTN, SAGE_ATTN, FLASH_ATTN, TORCH_SDPA, VIDEO_SPARSE_ATTN, VMOBA_ATTN, SAGE_ATTN_THREE), hidden_size: int = 0, num_attention_heads: int = 16, num_channels_latents: int = 0, exclude_lora_layers: list[str] = (lambda: ['embedder'])(), boundary_ratio: float | None = None, in_channels: int = 16, out_channels: int = 16, attention_head_dim: int = 128, num_layers: int = 28, mlp_ratio: float = 4.0, text_embed_dim: int = 1024, adaln_lora_dim: int = 256, use_adaln_lora: bool = True, max_size: tuple[int, int, int] = (128, 240, 240), patch_size: tuple[int, int, int] = (1, 2, 2), rope_scale: tuple[float, float, float] = (1.0, 3.0, 3.0), concat_padding_mask: bool = True, extra_pos_embed_type: str | None = None, use_crossattn_projection: bool = False, crossattn_proj_in_channels: int = 100352, rope_enable_fps_modulation: bool = True, qk_norm: str = 'rms_norm', eps: float = 1e-06)

Bases: DiTArchConfig

Configuration for Cosmos 2.5 architecture (MiniTrainDIT).

fastvideo.configs.models.dits.cosmos2_5.Cosmos25VideoConfig dataclass
Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Configuration for Cosmos 2.5 video generation model.

fastvideo.configs.models.encoders

Classes

fastvideo.configs.models.encoders.T5LargeConfig dataclass
T5LargeConfig(arch_config: TextEncoderArchConfig = T5LargeArchConfig(), prefix: str = 't5', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None)

Bases: TextEncoderConfig

T5 Large configuration for your specific model.

Modules

fastvideo.configs.models.encoders.t5
Classes
fastvideo.configs.models.encoders.t5.T5LargeArchConfig dataclass
T5LargeArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = (lambda: [('.qkv_proj', '.q', 'q'), ('.qkv_proj', '.k', 'k'), ('.qkv_proj', '.v', 'v')])(), architectures: list[str] = (lambda: [])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = False, use_return_dict: bool = True, vocab_size: int = 32128, hidden_size: int = 0, num_hidden_layers: int = 0, num_attention_heads: int = 0, pad_token_id: int = 0, eos_token_id: int = 1, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [_is_transformer_layer, _is_embeddings, _is_final_layernorm])(), d_model: int = 1024, d_kv: int = 128, d_ff: int = 65536, num_layers: int = 24, num_decoder_layers: int | None = 24, num_heads: int = 128, relative_attention_num_buckets: int = 32, relative_attention_max_distance: int = 128, dropout_rate: float = 0.1, layer_norm_epsilon: float = 1e-06, initializer_factor: float = 1.0, feed_forward_proj: str = 'relu', dense_act_fn: str = '', is_gated_act: bool = False, is_encoder_decoder: bool = True, use_cache: bool = True, classifier_dropout: float = 0.0, n_positions: int = 512, task_specific_params: dict | None = None)

Bases: T5ArchConfig

T5 Large architecture config with parameters for your specific model.

fastvideo.configs.models.encoders.t5.T5LargeConfig dataclass
T5LargeConfig(arch_config: TextEncoderArchConfig = T5LargeArchConfig(), prefix: str = 't5', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None)

Bases: TextEncoderConfig

T5 Large configuration for your specific model.

fastvideo.configs.models.vaes

Modules

fastvideo.configs.models.vaes.base
Classes
fastvideo.configs.models.vaes.base.VAEConfig dataclass
VAEConfig(arch_config: VAEArchConfig = VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = True, use_temporal_tiling: bool = True, use_parallel_tiling: bool = True, use_temporal_scaling_frames: bool = True)

Bases: ModelConfig

Functions
fastvideo.configs.models.vaes.base.VAEConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'vae-config') -> Any

Add CLI arguments for VAEConfig fields

Source code in fastvideo/configs/models/vaes/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "vae-config") -> Any:
    """Add CLI arguments for VAEConfig fields"""
    parser.add_argument(
        f"--{prefix}.load-encoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_encoder",
        default=VAEConfig.load_encoder,
        help="Whether to load the VAE encoder",
    )
    parser.add_argument(
        f"--{prefix}.load-decoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_decoder",
        default=VAEConfig.load_decoder,
        help="Whether to load the VAE decoder",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_height",
        default=VAEConfig.tile_sample_min_height,
        help="Minimum height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_width",
        default=VAEConfig.tile_sample_min_width,
        help="Minimum width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_num_frames",
        default=VAEConfig.tile_sample_min_num_frames,
        help="Minimum number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_height",
        default=VAEConfig.tile_sample_stride_height,
        help="Stride height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_width",
        default=VAEConfig.tile_sample_stride_width,
        help="Stride width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_num_frames",
        default=VAEConfig.tile_sample_stride_num_frames,
        help="Stride number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.blend-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.blend_num_frames",
        default=VAEConfig.blend_num_frames,
        help="Number of frames to blend for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.use-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_tiling",
        default=VAEConfig.use_tiling,
        help="Whether to use tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-temporal-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_temporal_tiling",
        default=VAEConfig.use_temporal_tiling,
        help="Whether to use temporal tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-parallel-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_parallel_tiling",
        default=VAEConfig.use_parallel_tiling,
        help="Whether to use parallel tiling for VAE",
    )

    return parser