dits
¶
Classes¶
fastvideo.configs.models.dits.Cosmos25VideoConfig
dataclass
¶
Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)
fastvideo.configs.models.dits.LongCatVideoConfig
dataclass
¶
LongCatVideoConfig(arch_config: DiTArchConfig = LongCatVideoArchConfig(), prefix: str = 'longcat', quant_config: QuantizationConfig | None = None)
Modules¶
fastvideo.configs.models.dits.base
¶
Classes¶
fastvideo.configs.models.dits.base.DiTConfig
dataclass
¶
DiTConfig(arch_config: DiTArchConfig = DiTArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None)
Bases: ModelConfig
Functions¶
fastvideo.configs.models.dits.base.DiTConfig.add_cli_args
staticmethod
¶Add CLI arguments for DiTConfig fields
Source code in fastvideo/configs/models/dits/base.py
fastvideo.configs.models.dits.cosmos2_5
¶
Classes¶
fastvideo.configs.models.dits.cosmos2_5.Cosmos25ArchConfig
dataclass
¶
Cosmos25ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_transformer_blocks])(), _compile_conditions: list = list(), param_names_mapping: dict = (lambda: {'^net\\.x_embedder\\.proj\\.1\\.(.*)$': 'patch_embed.proj.\\1', '^net\\.t_embedder\\.1\\.linear_1\\.(.*)$': 'time_embed.t_embedder.linear_1.\\1', '^net\\.t_embedder\\.1\\.linear_2\\.(.*)$': 'time_embed.t_embedder.linear_2.\\1', '^net\\.t_embedding_norm\\.(.*)$': 'time_embed.norm.\\1', '^net\\.crossattn_proj\\.0\\.weight$': 'crossattn_proj.0.weight', '^net\\.crossattn_proj\\.0\\.bias$': 'crossattn_proj.0.bias', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer1\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_in.\\2', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer2\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_out.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.2.\\2', '^net\\.blocks\\.(\\d+)\\.layer_norm_self_attn\\._extra_state$': 'transformer_blocks.\\1.norm1.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_cross_attn\\._extra_state$': 'transformer_blocks.\\1.norm2.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_mlp\\._extra_state$': 'transformer_blocks.\\1.norm3.norm._extra_state', '^net\\.final_layer\\.linear\\.(.*)$': 'final_layer.proj_out.\\1', '^net\\.final_layer\\.adaln_modulation\\.1\\.(.*)$': 'final_layer.linear_1.\\1', '^net\\.final_layer\\.adaln_modulation\\.2\\.(.*)$': 'final_layer.linear_2.\\1'})(), reverse_param_names_mapping: dict = dict(), lora_param_names_mapping: dict = (lambda: {'^transformer_blocks\\.(\\d+)\\.attn1\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.mlp\\.(.*)$': 'transformer_blocks.\\1.mlp.\\2'})(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (SLIDING_TILE_ATTN, SAGE_ATTN, FLASH_ATTN, TORCH_SDPA, VIDEO_SPARSE_ATTN, VMOBA_ATTN, SAGE_ATTN_THREE), hidden_size: int = 0, num_attention_heads: int = 16, num_channels_latents: int = 0, in_channels: int = 16, out_channels: int = 16, exclude_lora_layers: list[str] = (lambda: ['embedder'])(), boundary_ratio: float | None = None, attention_head_dim: int = 128, num_layers: int = 28, mlp_ratio: float = 4.0, text_embed_dim: int = 1024, adaln_lora_dim: int = 256, use_adaln_lora: bool = True, max_size: tuple[int, int, int] = (128, 240, 240), patch_size: tuple[int, int, int] = (1, 2, 2), rope_scale: tuple[float, float, float] = (1.0, 3.0, 3.0), concat_padding_mask: bool = True, extra_pos_embed_type: str | None = None, use_crossattn_projection: bool = False, crossattn_proj_in_channels: int = 100352, rope_enable_fps_modulation: bool = True, qk_norm: str = 'rms_norm', eps: float = 1e-06)
Bases: DiTArchConfig
Configuration for Cosmos 2.5 architecture (MiniTrainDIT).
fastvideo.configs.models.dits.cosmos2_5.Cosmos25VideoConfig
dataclass
¶
Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)
fastvideo.configs.models.dits.longcat
¶
LongCat Video DiT configuration for native FastVideo implementation.
Classes¶
fastvideo.configs.models.dits.longcat.LongCatVideoArchConfig
dataclass
¶
LongCatVideoArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_longcat_blocks])(), _compile_conditions: list = (lambda: [is_longcat_blocks])(), param_names_mapping: dict = (lambda: {'^x_embedder\\.(.*)$': 'patch_embed.\\1', '^t_embedder\\.mlp\\.0\\.(.*)$': 'time_embedder.linear_1.\\1', '^t_embedder\\.mlp\\.2\\.(.*)$': 'time_embedder.linear_2.\\1', '^y_embedder\\.y_proj\\.0\\.(.*)$': 'caption_embedder.linear_1.\\1', '^y_embedder\\.y_proj\\.2\\.(.*)$': 'caption_embedder.linear_2.\\1', '^blocks\\.(\\d+)\\.adaLN_modulation\\.1\\.(.*)$': 'blocks.\\1.adaln_linear_1.\\2', '^blocks\\.(\\d+)\\.mod_norm_attn\\.(.*)$': 'blocks.\\1.norm_attn.\\2', '^blocks\\.(\\d+)\\.mod_norm_ffn\\.(.*)$': 'blocks.\\1.norm_ffn.\\2', '^blocks\\.(\\d+)\\.pre_crs_attn_norm\\.(.*)$': 'blocks.\\1.norm_cross.\\2', '^blocks\\.(\\d+)\\.attn\\.qkv\\.(.*)$': 'blocks.\\1.self_attn.qkv_fused.\\2', '^blocks\\.(\\d+)\\.attn\\.proj\\.(.*)$': 'blocks.\\1.self_attn.to_out.\\2', '^blocks\\.(\\d+)\\.attn\\.q_norm\\.(.*)$': 'blocks.\\1.self_attn.q_norm.\\2', '^blocks\\.(\\d+)\\.attn\\.k_norm\\.(.*)$': 'blocks.\\1.self_attn.k_norm.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.q_linear\\.(.*)$': 'blocks.\\1.cross_attn.to_q.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.kv_linear\\.(.*)$': 'blocks.\\1.cross_attn.kv_fused.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.proj\\.(.*)$': 'blocks.\\1.cross_attn.to_out.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.(.*)$': 'blocks.\\1.cross_attn.q_norm.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.(.*)$': 'blocks.\\1.cross_attn.k_norm.\\2', '^blocks\\.(\\d+)\\.ffn\\.w1\\.(.*)$': 'blocks.\\1.ffn.w1.\\2', '^blocks\\.(\\d+)\\.ffn\\.w2\\.(.*)$': 'blocks.\\1.ffn.w2.\\2', '^blocks\\.(\\d+)\\.ffn\\.w3\\.(.*)$': 'blocks.\\1.ffn.w3.\\2', '^final_layer\\.adaLN_modulation\\.1\\.(.*)$': 'final_layer.adaln_linear.\\1', '^final_layer\\.norm_final\\.(.*)$': 'final_layer.norm.\\1', '^final_layer\\.linear\\.(.*)$': 'final_layer.proj.\\1'})(), reverse_param_names_mapping: dict = (lambda: {})(), lora_param_names_mapping: dict = (lambda: {})(), _supported_attention_backends: tuple = (lambda: (FLASH_ATTN, TORCH_SDPA))(), hidden_size: int = 4096, num_attention_heads: int = 32, num_channels_latents: int = 16, in_channels: int = 16, out_channels: int = 16, exclude_lora_layers: list[str] = (lambda: [])(), boundary_ratio: float | None = None, depth: int = 48, attention_head_dim: int = 128, patch_size: tuple[int, int, int] = (1, 2, 2), caption_channels: int = 4096, adaln_tembed_dim: int = 512, frequency_embedding_size: int = 256, mlp_ratio: int = 4, text_tokens_zero_pad: bool = True, enable_bsa: bool = False, bsa_params: dict | None = (lambda: {'sparsity': 0.9375, 'cdf_threshold': None, 'chunk_3d_shape_q': [4, 4, 4], 'chunk_3d_shape_k': [4, 4, 4]})())
Bases: DiTArchConfig
Architecture configuration for native LongCat Video DiT.
fastvideo.configs.models.dits.longcat.LongCatVideoConfig
dataclass
¶
LongCatVideoConfig(arch_config: DiTArchConfig = LongCatVideoArchConfig(), prefix: str = 'longcat', quant_config: QuantizationConfig | None = None)