Skip to content

cosmos2_5

Classes

fastvideo.configs.models.dits.cosmos2_5.Cosmos25ArchConfig dataclass

Cosmos25ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_transformer_blocks])(), _compile_conditions: list = list(), param_names_mapping: dict = (lambda: {'^net\\.x_embedder\\.proj\\.1\\.(.*)$': 'patch_embed.proj.\\1', '^net\\.t_embedder\\.1\\.linear_1\\.(.*)$': 'time_embed.t_embedder.linear_1.\\1', '^net\\.t_embedder\\.1\\.linear_2\\.(.*)$': 'time_embed.t_embedder.linear_2.\\1', '^net\\.t_embedding_norm\\.(.*)$': 'time_embed.norm.\\1', '^net\\.crossattn_proj\\.0\\.weight$': 'crossattn_proj.0.weight', '^net\\.crossattn_proj\\.0\\.bias$': 'crossattn_proj.0.bias', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer1\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_in.\\2', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer2\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_out.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.2.\\2', '^net\\.blocks\\.(\\d+)\\.layer_norm_self_attn\\._extra_state$': 'transformer_blocks.\\1.norm1.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_cross_attn\\._extra_state$': 'transformer_blocks.\\1.norm2.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_mlp\\._extra_state$': 'transformer_blocks.\\1.norm3.norm._extra_state', '^net\\.final_layer\\.linear\\.(.*)$': 'final_layer.proj_out.\\1', '^net\\.final_layer\\.adaln_modulation\\.1\\.(.*)$': 'final_layer.linear_1.\\1', '^net\\.final_layer\\.adaln_modulation\\.2\\.(.*)$': 'final_layer.linear_2.\\1'})(), reverse_param_names_mapping: dict = dict(), lora_param_names_mapping: dict = (lambda: {'^transformer_blocks\\.(\\d+)\\.attn1\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.mlp\\.(.*)$': 'transformer_blocks.\\1.mlp.\\2'})(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (SLIDING_TILE_ATTN, SAGE_ATTN, FLASH_ATTN, TORCH_SDPA, VIDEO_SPARSE_ATTN, VMOBA_ATTN, SAGE_ATTN_THREE), hidden_size: int = 0, num_attention_heads: int = 16, num_channels_latents: int = 0, exclude_lora_layers: list[str] = (lambda: ['embedder'])(), boundary_ratio: float | None = None, in_channels: int = 16, out_channels: int = 16, attention_head_dim: int = 128, num_layers: int = 28, mlp_ratio: float = 4.0, text_embed_dim: int = 1024, adaln_lora_dim: int = 256, use_adaln_lora: bool = True, max_size: tuple[int, int, int] = (128, 240, 240), patch_size: tuple[int, int, int] = (1, 2, 2), rope_scale: tuple[float, float, float] = (1.0, 3.0, 3.0), concat_padding_mask: bool = True, extra_pos_embed_type: str | None = None, use_crossattn_projection: bool = False, crossattn_proj_in_channels: int = 100352, rope_enable_fps_modulation: bool = True, qk_norm: str = 'rms_norm', eps: float = 1e-06)

Bases: DiTArchConfig

Configuration for Cosmos 2.5 architecture (MiniTrainDIT).

fastvideo.configs.models.dits.cosmos2_5.Cosmos25VideoConfig dataclass

Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Configuration for Cosmos 2.5 video generation model.