Skip to content

models

Classes

fastvideo.configs.models.DiTConfig dataclass

DiTConfig(arch_config: DiTArchConfig = DiTArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None)

Bases: ModelConfig

Functions

fastvideo.configs.models.DiTConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'dit-config') -> Any

Add CLI arguments for DiTConfig fields

Source code in fastvideo/configs/models/dits/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "dit-config") -> Any:
    """Add CLI arguments for DiTConfig fields"""
    parser.add_argument(
        f"--{prefix}.prefix",
        type=str,
        dest=f"{prefix.replace('-', '_')}.prefix",
        default=DiTConfig.prefix,
        help="Prefix for the DiT model",
    )

    parser.add_argument(
        f"--{prefix}.quant-config",
        type=str,
        dest=f"{prefix.replace('-', '_')}.quant_config",
        default=None,
        help="Quantization configuration for the DiT model",
    )

    return parser

fastvideo.configs.models.VAEConfig dataclass

VAEConfig(arch_config: VAEArchConfig = VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = True, use_temporal_tiling: bool = True, use_parallel_tiling: bool = True, use_temporal_scaling_frames: bool = True)

Bases: ModelConfig

Functions

fastvideo.configs.models.VAEConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'vae-config') -> Any

Add CLI arguments for VAEConfig fields

Source code in fastvideo/configs/models/vaes/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "vae-config") -> Any:
    """Add CLI arguments for VAEConfig fields"""
    parser.add_argument(
        f"--{prefix}.load-encoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_encoder",
        default=VAEConfig.load_encoder,
        help="Whether to load the VAE encoder",
    )
    parser.add_argument(
        f"--{prefix}.load-decoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_decoder",
        default=VAEConfig.load_decoder,
        help="Whether to load the VAE decoder",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_height",
        default=VAEConfig.tile_sample_min_height,
        help="Minimum height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_width",
        default=VAEConfig.tile_sample_min_width,
        help="Minimum width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_num_frames",
        default=VAEConfig.tile_sample_min_num_frames,
        help="Minimum number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_height",
        default=VAEConfig.tile_sample_stride_height,
        help="Stride height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_width",
        default=VAEConfig.tile_sample_stride_width,
        help="Stride width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_num_frames",
        default=VAEConfig.tile_sample_stride_num_frames,
        help="Stride number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.blend-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.blend_num_frames",
        default=VAEConfig.blend_num_frames,
        help="Number of frames to blend for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.use-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_tiling",
        default=VAEConfig.use_tiling,
        help="Whether to use tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-temporal-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_temporal_tiling",
        default=VAEConfig.use_temporal_tiling,
        help="Whether to use temporal tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-parallel-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_parallel_tiling",
        default=VAEConfig.use_parallel_tiling,
        help="Whether to use parallel tiling for VAE",
    )

    return parser

Modules

fastvideo.configs.models.dits

Classes

fastvideo.configs.models.dits.Cosmos25VideoConfig dataclass
Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Configuration for Cosmos 2.5 video generation model.

fastvideo.configs.models.dits.LongCatVideoConfig dataclass
LongCatVideoConfig(arch_config: DiTArchConfig = LongCatVideoArchConfig(), prefix: str = 'longcat', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Main configuration for LongCat Video DiT.

Modules

fastvideo.configs.models.dits.base
Classes
fastvideo.configs.models.dits.base.DiTConfig dataclass
DiTConfig(arch_config: DiTArchConfig = DiTArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None)

Bases: ModelConfig

Functions
fastvideo.configs.models.dits.base.DiTConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'dit-config') -> Any

Add CLI arguments for DiTConfig fields

Source code in fastvideo/configs/models/dits/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "dit-config") -> Any:
    """Add CLI arguments for DiTConfig fields"""
    parser.add_argument(
        f"--{prefix}.prefix",
        type=str,
        dest=f"{prefix.replace('-', '_')}.prefix",
        default=DiTConfig.prefix,
        help="Prefix for the DiT model",
    )

    parser.add_argument(
        f"--{prefix}.quant-config",
        type=str,
        dest=f"{prefix.replace('-', '_')}.quant_config",
        default=None,
        help="Quantization configuration for the DiT model",
    )

    return parser
fastvideo.configs.models.dits.cosmos2_5
Classes
fastvideo.configs.models.dits.cosmos2_5.Cosmos25ArchConfig dataclass
Cosmos25ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_transformer_blocks])(), _compile_conditions: list = list(), param_names_mapping: dict = (lambda: {'^net\\.x_embedder\\.proj\\.1\\.(.*)$': 'patch_embed.proj.\\1', '^net\\.t_embedder\\.1\\.linear_1\\.(.*)$': 'time_embed.t_embedder.linear_1.\\1', '^net\\.t_embedder\\.1\\.linear_2\\.(.*)$': 'time_embed.t_embedder.linear_2.\\1', '^net\\.t_embedding_norm\\.(.*)$': 'time_embed.norm.\\1', '^net\\.crossattn_proj\\.0\\.weight$': 'crossattn_proj.0.weight', '^net\\.crossattn_proj\\.0\\.bias$': 'crossattn_proj.0.bias', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer1\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_in.\\2', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer2\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_out.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.2.\\2', '^net\\.blocks\\.(\\d+)\\.layer_norm_self_attn\\._extra_state$': 'transformer_blocks.\\1.norm1.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_cross_attn\\._extra_state$': 'transformer_blocks.\\1.norm2.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_mlp\\._extra_state$': 'transformer_blocks.\\1.norm3.norm._extra_state', '^net\\.final_layer\\.linear\\.(.*)$': 'final_layer.proj_out.\\1', '^net\\.final_layer\\.adaln_modulation\\.1\\.(.*)$': 'final_layer.linear_1.\\1', '^net\\.final_layer\\.adaln_modulation\\.2\\.(.*)$': 'final_layer.linear_2.\\1'})(), reverse_param_names_mapping: dict = dict(), lora_param_names_mapping: dict = (lambda: {'^transformer_blocks\\.(\\d+)\\.attn1\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.mlp\\.(.*)$': 'transformer_blocks.\\1.mlp.\\2'})(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (SLIDING_TILE_ATTN, SAGE_ATTN, FLASH_ATTN, TORCH_SDPA, VIDEO_SPARSE_ATTN, VMOBA_ATTN, SAGE_ATTN_THREE, SLA_ATTN, SAGE_SLA_ATTN), hidden_size: int = 0, num_attention_heads: int = 16, num_channels_latents: int = 0, in_channels: int = 16, out_channels: int = 16, exclude_lora_layers: list[str] = (lambda: ['embedder'])(), boundary_ratio: float | None = None, attention_head_dim: int = 128, num_layers: int = 28, mlp_ratio: float = 4.0, text_embed_dim: int = 1024, adaln_lora_dim: int = 256, use_adaln_lora: bool = True, max_size: tuple[int, int, int] = (128, 240, 240), patch_size: tuple[int, int, int] = (1, 2, 2), rope_scale: tuple[float, float, float] = (1.0, 3.0, 3.0), concat_padding_mask: bool = True, extra_pos_embed_type: str | None = None, use_crossattn_projection: bool = False, crossattn_proj_in_channels: int = 100352, rope_enable_fps_modulation: bool = True, qk_norm: str = 'rms_norm', eps: float = 1e-06)

Bases: DiTArchConfig

Configuration for Cosmos 2.5 architecture (MiniTrainDIT).

fastvideo.configs.models.dits.cosmos2_5.Cosmos25VideoConfig dataclass
Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Configuration for Cosmos 2.5 video generation model.

fastvideo.configs.models.dits.longcat

LongCat Video DiT configuration for native FastVideo implementation.

Classes
fastvideo.configs.models.dits.longcat.LongCatVideoArchConfig dataclass
LongCatVideoArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_longcat_blocks])(), _compile_conditions: list = (lambda: [is_longcat_blocks])(), param_names_mapping: dict = (lambda: {'^x_embedder\\.(.*)$': 'patch_embed.\\1', '^t_embedder\\.mlp\\.0\\.(.*)$': 'time_embedder.linear_1.\\1', '^t_embedder\\.mlp\\.2\\.(.*)$': 'time_embedder.linear_2.\\1', '^y_embedder\\.y_proj\\.0\\.(.*)$': 'caption_embedder.linear_1.\\1', '^y_embedder\\.y_proj\\.2\\.(.*)$': 'caption_embedder.linear_2.\\1', '^blocks\\.(\\d+)\\.adaLN_modulation\\.1\\.(.*)$': 'blocks.\\1.adaln_linear_1.\\2', '^blocks\\.(\\d+)\\.mod_norm_attn\\.(.*)$': 'blocks.\\1.norm_attn.\\2', '^blocks\\.(\\d+)\\.mod_norm_ffn\\.(.*)$': 'blocks.\\1.norm_ffn.\\2', '^blocks\\.(\\d+)\\.pre_crs_attn_norm\\.(.*)$': 'blocks.\\1.norm_cross.\\2', '^blocks\\.(\\d+)\\.attn\\.qkv\\.(.*)$': 'blocks.\\1.self_attn.qkv_fused.\\2', '^blocks\\.(\\d+)\\.attn\\.proj\\.(.*)$': 'blocks.\\1.self_attn.to_out.\\2', '^blocks\\.(\\d+)\\.attn\\.q_norm\\.(.*)$': 'blocks.\\1.self_attn.q_norm.\\2', '^blocks\\.(\\d+)\\.attn\\.k_norm\\.(.*)$': 'blocks.\\1.self_attn.k_norm.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.q_linear\\.(.*)$': 'blocks.\\1.cross_attn.to_q.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.kv_linear\\.(.*)$': 'blocks.\\1.cross_attn.kv_fused.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.proj\\.(.*)$': 'blocks.\\1.cross_attn.to_out.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.(.*)$': 'blocks.\\1.cross_attn.q_norm.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.(.*)$': 'blocks.\\1.cross_attn.k_norm.\\2', '^blocks\\.(\\d+)\\.ffn\\.w1\\.(.*)$': 'blocks.\\1.ffn.w1.\\2', '^blocks\\.(\\d+)\\.ffn\\.w2\\.(.*)$': 'blocks.\\1.ffn.w2.\\2', '^blocks\\.(\\d+)\\.ffn\\.w3\\.(.*)$': 'blocks.\\1.ffn.w3.\\2', '^final_layer\\.adaLN_modulation\\.1\\.(.*)$': 'final_layer.adaln_linear.\\1', '^final_layer\\.norm_final\\.(.*)$': 'final_layer.norm.\\1', '^final_layer\\.linear\\.(.*)$': 'final_layer.proj.\\1'})(), reverse_param_names_mapping: dict = (lambda: {})(), lora_param_names_mapping: dict = (lambda: {})(), _supported_attention_backends: tuple = (lambda: (FLASH_ATTN, TORCH_SDPA))(), hidden_size: int = 4096, num_attention_heads: int = 32, num_channels_latents: int = 16, in_channels: int = 16, out_channels: int = 16, exclude_lora_layers: list[str] = (lambda: [])(), boundary_ratio: float | None = None, depth: int = 48, attention_head_dim: int = 128, patch_size: tuple[int, int, int] = (1, 2, 2), caption_channels: int = 4096, adaln_tembed_dim: int = 512, frequency_embedding_size: int = 256, mlp_ratio: int = 4, text_tokens_zero_pad: bool = True, enable_bsa: bool = False, bsa_params: dict | None = (lambda: {'sparsity': 0.9375, 'cdf_threshold': None, 'chunk_3d_shape_q': [4, 4, 4], 'chunk_3d_shape_k': [4, 4, 4]})())

Bases: DiTArchConfig

Architecture configuration for native LongCat Video DiT.

fastvideo.configs.models.dits.longcat.LongCatVideoConfig dataclass
LongCatVideoConfig(arch_config: DiTArchConfig = LongCatVideoArchConfig(), prefix: str = 'longcat', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Main configuration for LongCat Video DiT.

Functions
fastvideo.configs.models.dits.longcat.is_longcat_blocks
is_longcat_blocks(n: str, m) -> bool

FSDP shard condition for LongCat transformer blocks.

Source code in fastvideo/configs/models/dits/longcat.py
def is_longcat_blocks(n: str, m) -> bool:
    """FSDP shard condition for LongCat transformer blocks."""
    return "blocks" in n and str.isdigit(n.split(".")[-1])

fastvideo.configs.models.encoders

Classes

fastvideo.configs.models.encoders.Reason1ArchConfig dataclass
Reason1ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), architectures: list[str] = (lambda: ['Qwen2_5_VLForConditionalGeneration'])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = True, use_return_dict: bool = True, vocab_size: int = 152064, hidden_size: int = 3584, num_hidden_layers: int = 28, num_attention_heads: int = 28, pad_token_id: int = 151643, eos_token_id: int = 151645, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [])(), model_type: str = 'qwen2_5_vl', num_key_value_heads: int = 4, intermediate_size: int = 18944, bos_token_id: int = 151643, image_token_id: int = 151655, video_token_id: int = 151656, vision_token_id: int = 151654, vision_start_token_id: int = 151652, vision_end_token_id: int = 151653, vision_config: dict[str, Any] | None = None, rope_theta: float = 1000000.0, rope_scaling: dict[str, Any] | None = (lambda: {'type': 'mrope', 'mrope_section': [16, 24, 24]})(), max_position_embeddings: int = 128000, max_window_layers: int = 28, embedding_concat_strategy: str = 'mean_pooling', n_layers_per_group: int = 5, num_embedding_padding_tokens: int = 512, attention_dropout: float = 0.0, hidden_act: str = 'silu', initializer_range: float = 0.02, rms_norm_eps: float = 1e-06, use_sliding_window: bool = False, sliding_window: int = 32768, use_cache: bool = False, torch_dtype: str = 'bfloat16', _attn_implementation: str = 'flash_attention_2')

Bases: TextEncoderArchConfig

Architecture settings (defaults match Qwen2.5-VL-7B-Instruct).

fastvideo.configs.models.encoders.Reason1Config dataclass
Reason1Config(arch_config: Reason1ArchConfig = Reason1ArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False, tokenizer_type: str = 'Qwen/Qwen2.5-VL-7B-Instruct')

Bases: TextEncoderConfig

Reason1 text encoder config.

fastvideo.configs.models.encoders.T5LargeConfig dataclass
T5LargeConfig(arch_config: TextEncoderArchConfig = T5LargeArchConfig(), prefix: str = 't5', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False)

Bases: TextEncoderConfig

T5 Large configuration for your specific model.

Modules

fastvideo.configs.models.encoders.reason1

Config for Reason1 (Qwen2.5-VL) text encoder.

Classes
fastvideo.configs.models.encoders.reason1.Reason1ArchConfig dataclass
Reason1ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), architectures: list[str] = (lambda: ['Qwen2_5_VLForConditionalGeneration'])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = True, use_return_dict: bool = True, vocab_size: int = 152064, hidden_size: int = 3584, num_hidden_layers: int = 28, num_attention_heads: int = 28, pad_token_id: int = 151643, eos_token_id: int = 151645, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [])(), model_type: str = 'qwen2_5_vl', num_key_value_heads: int = 4, intermediate_size: int = 18944, bos_token_id: int = 151643, image_token_id: int = 151655, video_token_id: int = 151656, vision_token_id: int = 151654, vision_start_token_id: int = 151652, vision_end_token_id: int = 151653, vision_config: dict[str, Any] | None = None, rope_theta: float = 1000000.0, rope_scaling: dict[str, Any] | None = (lambda: {'type': 'mrope', 'mrope_section': [16, 24, 24]})(), max_position_embeddings: int = 128000, max_window_layers: int = 28, embedding_concat_strategy: str = 'mean_pooling', n_layers_per_group: int = 5, num_embedding_padding_tokens: int = 512, attention_dropout: float = 0.0, hidden_act: str = 'silu', initializer_range: float = 0.02, rms_norm_eps: float = 1e-06, use_sliding_window: bool = False, sliding_window: int = 32768, use_cache: bool = False, torch_dtype: str = 'bfloat16', _attn_implementation: str = 'flash_attention_2')

Bases: TextEncoderArchConfig

Architecture settings (defaults match Qwen2.5-VL-7B-Instruct).

fastvideo.configs.models.encoders.reason1.Reason1Config dataclass
Reason1Config(arch_config: Reason1ArchConfig = Reason1ArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False, tokenizer_type: str = 'Qwen/Qwen2.5-VL-7B-Instruct')

Bases: TextEncoderConfig

Reason1 text encoder config.

fastvideo.configs.models.encoders.t5
Classes
fastvideo.configs.models.encoders.t5.T5LargeArchConfig dataclass
T5LargeArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = (lambda: [('.qkv_proj', '.q', 'q'), ('.qkv_proj', '.k', 'k'), ('.qkv_proj', '.v', 'v')])(), architectures: list[str] = (lambda: [])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = False, use_return_dict: bool = True, vocab_size: int = 32128, hidden_size: int = 0, num_hidden_layers: int = 0, num_attention_heads: int = 0, pad_token_id: int = 0, eos_token_id: int = 1, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [_is_transformer_layer, _is_embeddings, _is_final_layernorm])(), d_model: int = 1024, d_kv: int = 128, d_ff: int = 65536, num_layers: int = 24, num_decoder_layers: int | None = 24, num_heads: int = 128, relative_attention_num_buckets: int = 32, relative_attention_max_distance: int = 128, dropout_rate: float = 0.1, layer_norm_epsilon: float = 1e-06, initializer_factor: float = 1.0, feed_forward_proj: str = 'relu', dense_act_fn: str = '', is_gated_act: bool = False, is_encoder_decoder: bool = True, use_cache: bool = True, classifier_dropout: float = 0.0, dtype: str | None = None, gradient_checkpointing: bool = False, n_positions: int = 512, task_specific_params: dict | None = None)

Bases: T5ArchConfig

T5 Large architecture config with parameters for your specific model.

fastvideo.configs.models.encoders.t5.T5LargeConfig dataclass
T5LargeConfig(arch_config: TextEncoderArchConfig = T5LargeArchConfig(), prefix: str = 't5', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False)

Bases: TextEncoderConfig

T5 Large configuration for your specific model.

fastvideo.configs.models.vaes

Classes

fastvideo.configs.models.vaes.Cosmos25VAEConfig dataclass
Cosmos25VAEConfig(arch_config: Cosmos25VAEArchConfig = Cosmos25VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = False, use_temporal_tiling: bool = False, use_parallel_tiling: bool = False, use_temporal_scaling_frames: bool = True, use_feature_cache: bool = True)

Bases: VAEConfig

Cosmos2.5 VAE config.

Modules

fastvideo.configs.models.vaes.base
Classes
fastvideo.configs.models.vaes.base.VAEConfig dataclass
VAEConfig(arch_config: VAEArchConfig = VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = True, use_temporal_tiling: bool = True, use_parallel_tiling: bool = True, use_temporal_scaling_frames: bool = True)

Bases: ModelConfig

Functions
fastvideo.configs.models.vaes.base.VAEConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'vae-config') -> Any

Add CLI arguments for VAEConfig fields

Source code in fastvideo/configs/models/vaes/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "vae-config") -> Any:
    """Add CLI arguments for VAEConfig fields"""
    parser.add_argument(
        f"--{prefix}.load-encoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_encoder",
        default=VAEConfig.load_encoder,
        help="Whether to load the VAE encoder",
    )
    parser.add_argument(
        f"--{prefix}.load-decoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_decoder",
        default=VAEConfig.load_decoder,
        help="Whether to load the VAE decoder",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_height",
        default=VAEConfig.tile_sample_min_height,
        help="Minimum height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_width",
        default=VAEConfig.tile_sample_min_width,
        help="Minimum width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_num_frames",
        default=VAEConfig.tile_sample_min_num_frames,
        help="Minimum number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_height",
        default=VAEConfig.tile_sample_stride_height,
        help="Stride height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_width",
        default=VAEConfig.tile_sample_stride_width,
        help="Stride width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_num_frames",
        default=VAEConfig.tile_sample_stride_num_frames,
        help="Stride number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.blend-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.blend_num_frames",
        default=VAEConfig.blend_num_frames,
        help="Number of frames to blend for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.use-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_tiling",
        default=VAEConfig.use_tiling,
        help="Whether to use tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-temporal-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_temporal_tiling",
        default=VAEConfig.use_temporal_tiling,
        help="Whether to use temporal tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-parallel-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_parallel_tiling",
        default=VAEConfig.use_parallel_tiling,
        help="Whether to use parallel tiling for VAE",
    )

    return parser
fastvideo.configs.models.vaes.cosmos2_5vae

Cosmos 2.5 (Wan2.1-style) VAE config and checkpoint-key mapping.

Classes
fastvideo.configs.models.vaes.cosmos2_5vae.Cosmos25VAEArchConfig dataclass
Cosmos25VAEArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), scaling_factor: float | Tensor = 0, temporal_compression_ratio: int = 4, spatial_compression_ratio: int = 8, _name_or_path: str = '', base_dim: int = 96, decoder_base_dim: int | None = None, z_dim: int = 16, dim_mult: tuple[int, ...] = (1, 2, 4, 4), num_res_blocks: int = 2, attn_scales: tuple[float, ...] = (), temperal_downsample: tuple[bool, ...] = (False, True, True), dropout: float = 0.0, is_residual: bool = False, in_channels: int = 3, out_channels: int = 3, patch_size: int | None = None, scale_factor_temporal: int = 4, scale_factor_spatial: int = 8, clip_output: bool = True, latents_mean: tuple[float, ...] = (-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921), latents_std: tuple[float, ...] = (2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.916), param_names_mapping: dict[str, str] = (lambda: {'^conv1\\.(.*)$': 'quant_conv.\\1', '^conv2\\.(.*)$': 'post_quant_conv.\\1', '^encoder\\.conv1\\.(.*)$': 'encoder.conv_in.\\1', '^decoder\\.conv1\\.(.*)$': 'decoder.conv_in.\\1', '^encoder\\.head\\.0\\.gamma$': 'encoder.norm_out.gamma', '^encoder\\.head\\.2\\.(.*)$': 'encoder.conv_out.\\1', '^decoder\\.head\\.0\\.gamma$': 'decoder.norm_out.gamma', '^decoder\\.head\\.2\\.(.*)$': 'decoder.conv_out.\\1'})())

Bases: VAEArchConfig

Functions
fastvideo.configs.models.vaes.cosmos2_5vae.Cosmos25VAEArchConfig.map_official_key staticmethod
map_official_key(key: str) -> str | None

Map a single official checkpoint key into FastVideo key space.

Source code in fastvideo/configs/models/vaes/cosmos2_5vae.py
@staticmethod
def map_official_key(key: str) -> str | None:
    """Map a single official checkpoint key into FastVideo key space."""

    def map_residual_subkey(prefix: str, sub: str) -> str | None:
        if re.match(r"^residual\.0\.gamma$", sub):
            return f"{prefix}.norm1.gamma"
        m = re.match(r"^residual\.2\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.conv1.{m.group(1)}"
        if re.match(r"^residual\.3\.gamma$", sub):
            return f"{prefix}.norm2.gamma"
        m = re.match(r"^residual\.6\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.conv2.{m.group(1)}"
        m = re.match(r"^shortcut\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.conv_shortcut.{m.group(1)}"
        return None

    def map_attn_subkey(prefix: str, sub: str) -> str | None:
        if re.match(r"^norm\.gamma$", sub):
            return f"{prefix}.norm.gamma"
        m = re.match(r"^to_qkv\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.to_qkv.{m.group(1)}"
        m = re.match(r"^proj\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.proj.{m.group(1)}"
        return None

    def map_resample_subkey(prefix: str, sub: str) -> str | None:
        m = re.match(r"^resample\.1\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.resample.1.{m.group(1)}"
        m = re.match(r"^time_conv\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.time_conv.{m.group(1)}"
        return None

    m = re.match(r"^conv1\.(weight|bias)$", key)
    if m:
        return f"quant_conv.{m.group(1)}"
    m = re.match(r"^conv2\.(weight|bias)$", key)
    if m:
        return f"post_quant_conv.{m.group(1)}"
    m = re.match(r"^(encoder|decoder)\.conv1\.(weight|bias)$", key)
    if m:
        return f"{m.group(1)}.conv_in.{m.group(2)}"
    m = re.match(r"^(encoder|decoder)\.head\.0\.gamma$", key)
    if m:
        return f"{m.group(1)}.norm_out.gamma"
    m = re.match(r"^(encoder|decoder)\.head\.2\.(weight|bias)$", key)
    if m:
        return f"{m.group(1)}.conv_out.{m.group(2)}"

    m = re.match(r"^(encoder|decoder)\.middle\.0\.(.*)$", key)
    if m:
        return map_residual_subkey(f"{m.group(1)}.mid_block.resnets.0",
                                   m.group(2))
    m = re.match(r"^(encoder|decoder)\.middle\.1\.(.*)$", key)
    if m:
        return map_attn_subkey(f"{m.group(1)}.mid_block.attentions.0",
                               m.group(2))
    m = re.match(r"^(encoder|decoder)\.middle\.2\.(.*)$", key)
    if m:
        return map_residual_subkey(f"{m.group(1)}.mid_block.resnets.1",
                                   m.group(2))

    m = re.match(r"^encoder\.downsamples\.(\d+)\.(.*)$", key)
    if m:
        idx = int(m.group(1))
        sub = m.group(2)
        if sub.startswith("residual.") or sub.startswith("shortcut."):
            return map_residual_subkey(f"encoder.down_blocks.{idx}", sub)
        if sub.startswith("resample.") or sub.startswith("time_conv."):
            return map_resample_subkey(f"encoder.down_blocks.{idx}", sub)
        return None

    m = re.match(r"^decoder\.upsamples\.(\d+)\.(.*)$", key)
    if m:
        uidx = int(m.group(1))
        sub = m.group(2)

        if uidx in (0, 1, 2):
            block_i, res_i = 0, uidx
        elif uidx == 3:
            block_i, res_i = 0, None
        elif uidx in (4, 5, 6):
            block_i, res_i = 1, uidx - 4
        elif uidx == 7:
            block_i, res_i = 1, None
        elif uidx in (8, 9, 10):
            block_i, res_i = 2, uidx - 8
        elif uidx == 11:
            block_i, res_i = 2, None
        elif uidx in (12, 13, 14):
            block_i, res_i = 3, uidx - 12
        else:
            return None

        if res_i is None:
            return map_resample_subkey(
                f"decoder.up_blocks.{block_i}.upsamplers.0",
                sub,
            )

        return map_residual_subkey(
            f"decoder.up_blocks.{block_i}.resnets.{res_i}",
            sub,
        )

    return None
fastvideo.configs.models.vaes.cosmos2_5vae.Cosmos25VAEConfig dataclass
Cosmos25VAEConfig(arch_config: Cosmos25VAEArchConfig = Cosmos25VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = False, use_temporal_tiling: bool = False, use_parallel_tiling: bool = False, use_temporal_scaling_frames: bool = True, use_feature_cache: bool = True)

Bases: VAEConfig

Cosmos2.5 VAE config.