models
¶
Classes¶
fastvideo.configs.models.DiTConfig
dataclass
¶
DiTConfig(arch_config: DiTArchConfig = DiTArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None)
Bases: ModelConfig
Functions¶
fastvideo.configs.models.DiTConfig.add_cli_args
staticmethod
¶
Add CLI arguments for DiTConfig fields
Source code in fastvideo/configs/models/dits/base.py
fastvideo.configs.models.VAEConfig
dataclass
¶
VAEConfig(arch_config: VAEArchConfig = VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = True, use_temporal_tiling: bool = True, use_parallel_tiling: bool = True, use_temporal_scaling_frames: bool = True)
Bases: ModelConfig
Functions¶
fastvideo.configs.models.VAEConfig.add_cli_args
staticmethod
¶
Add CLI arguments for VAEConfig fields
Source code in fastvideo/configs/models/vaes/base.py
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | |
Modules¶
fastvideo.configs.models.dits
¶
Classes¶
fastvideo.configs.models.dits.Cosmos25VideoConfig
dataclass
¶
Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)
fastvideo.configs.models.dits.LongCatVideoConfig
dataclass
¶
LongCatVideoConfig(arch_config: DiTArchConfig = LongCatVideoArchConfig(), prefix: str = 'longcat', quant_config: QuantizationConfig | None = None)
Modules¶
fastvideo.configs.models.dits.base
¶
Classes¶
fastvideo.configs.models.dits.base.DiTConfig
dataclass
¶DiTConfig(arch_config: DiTArchConfig = DiTArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None)
Bases: ModelConfig
fastvideo.configs.models.dits.base.DiTConfig.add_cli_args
staticmethod
¶Add CLI arguments for DiTConfig fields
Source code in fastvideo/configs/models/dits/base.py
fastvideo.configs.models.dits.cosmos2_5
¶
Classes¶
fastvideo.configs.models.dits.cosmos2_5.Cosmos25ArchConfig
dataclass
¶Cosmos25ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_transformer_blocks])(), _compile_conditions: list = list(), param_names_mapping: dict = (lambda: {'^net\\.x_embedder\\.proj\\.1\\.(.*)$': 'patch_embed.proj.\\1', '^net\\.t_embedder\\.1\\.linear_1\\.(.*)$': 'time_embed.t_embedder.linear_1.\\1', '^net\\.t_embedder\\.1\\.linear_2\\.(.*)$': 'time_embed.t_embedder.linear_2.\\1', '^net\\.t_embedding_norm\\.(.*)$': 'time_embed.norm.\\1', '^net\\.crossattn_proj\\.0\\.weight$': 'crossattn_proj.0.weight', '^net\\.crossattn_proj\\.0\\.bias$': 'crossattn_proj.0.bias', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer1\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_in.\\2', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer2\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_out.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.2.\\2', '^net\\.blocks\\.(\\d+)\\.layer_norm_self_attn\\._extra_state$': 'transformer_blocks.\\1.norm1.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_cross_attn\\._extra_state$': 'transformer_blocks.\\1.norm2.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_mlp\\._extra_state$': 'transformer_blocks.\\1.norm3.norm._extra_state', '^net\\.final_layer\\.linear\\.(.*)$': 'final_layer.proj_out.\\1', '^net\\.final_layer\\.adaln_modulation\\.1\\.(.*)$': 'final_layer.linear_1.\\1', '^net\\.final_layer\\.adaln_modulation\\.2\\.(.*)$': 'final_layer.linear_2.\\1'})(), reverse_param_names_mapping: dict = dict(), lora_param_names_mapping: dict = (lambda: {'^transformer_blocks\\.(\\d+)\\.attn1\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.mlp\\.(.*)$': 'transformer_blocks.\\1.mlp.\\2'})(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (SLIDING_TILE_ATTN, SAGE_ATTN, FLASH_ATTN, TORCH_SDPA, VIDEO_SPARSE_ATTN, VMOBA_ATTN, SAGE_ATTN_THREE, SLA_ATTN, SAGE_SLA_ATTN), hidden_size: int = 0, num_attention_heads: int = 16, num_channels_latents: int = 0, in_channels: int = 16, out_channels: int = 16, exclude_lora_layers: list[str] = (lambda: ['embedder'])(), boundary_ratio: float | None = None, attention_head_dim: int = 128, num_layers: int = 28, mlp_ratio: float = 4.0, text_embed_dim: int = 1024, adaln_lora_dim: int = 256, use_adaln_lora: bool = True, max_size: tuple[int, int, int] = (128, 240, 240), patch_size: tuple[int, int, int] = (1, 2, 2), rope_scale: tuple[float, float, float] = (1.0, 3.0, 3.0), concat_padding_mask: bool = True, extra_pos_embed_type: str | None = None, use_crossattn_projection: bool = False, crossattn_proj_in_channels: int = 100352, rope_enable_fps_modulation: bool = True, qk_norm: str = 'rms_norm', eps: float = 1e-06)
Bases: DiTArchConfig
Configuration for Cosmos 2.5 architecture (MiniTrainDIT).
fastvideo.configs.models.dits.cosmos2_5.Cosmos25VideoConfig
dataclass
¶Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)
fastvideo.configs.models.dits.longcat
¶
LongCat Video DiT configuration for native FastVideo implementation.
Classes¶
fastvideo.configs.models.dits.longcat.LongCatVideoArchConfig
dataclass
¶LongCatVideoArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_longcat_blocks])(), _compile_conditions: list = (lambda: [is_longcat_blocks])(), param_names_mapping: dict = (lambda: {'^x_embedder\\.(.*)$': 'patch_embed.\\1', '^t_embedder\\.mlp\\.0\\.(.*)$': 'time_embedder.linear_1.\\1', '^t_embedder\\.mlp\\.2\\.(.*)$': 'time_embedder.linear_2.\\1', '^y_embedder\\.y_proj\\.0\\.(.*)$': 'caption_embedder.linear_1.\\1', '^y_embedder\\.y_proj\\.2\\.(.*)$': 'caption_embedder.linear_2.\\1', '^blocks\\.(\\d+)\\.adaLN_modulation\\.1\\.(.*)$': 'blocks.\\1.adaln_linear_1.\\2', '^blocks\\.(\\d+)\\.mod_norm_attn\\.(.*)$': 'blocks.\\1.norm_attn.\\2', '^blocks\\.(\\d+)\\.mod_norm_ffn\\.(.*)$': 'blocks.\\1.norm_ffn.\\2', '^blocks\\.(\\d+)\\.pre_crs_attn_norm\\.(.*)$': 'blocks.\\1.norm_cross.\\2', '^blocks\\.(\\d+)\\.attn\\.qkv\\.(.*)$': 'blocks.\\1.self_attn.qkv_fused.\\2', '^blocks\\.(\\d+)\\.attn\\.proj\\.(.*)$': 'blocks.\\1.self_attn.to_out.\\2', '^blocks\\.(\\d+)\\.attn\\.q_norm\\.(.*)$': 'blocks.\\1.self_attn.q_norm.\\2', '^blocks\\.(\\d+)\\.attn\\.k_norm\\.(.*)$': 'blocks.\\1.self_attn.k_norm.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.q_linear\\.(.*)$': 'blocks.\\1.cross_attn.to_q.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.kv_linear\\.(.*)$': 'blocks.\\1.cross_attn.kv_fused.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.proj\\.(.*)$': 'blocks.\\1.cross_attn.to_out.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.(.*)$': 'blocks.\\1.cross_attn.q_norm.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.(.*)$': 'blocks.\\1.cross_attn.k_norm.\\2', '^blocks\\.(\\d+)\\.ffn\\.w1\\.(.*)$': 'blocks.\\1.ffn.w1.\\2', '^blocks\\.(\\d+)\\.ffn\\.w2\\.(.*)$': 'blocks.\\1.ffn.w2.\\2', '^blocks\\.(\\d+)\\.ffn\\.w3\\.(.*)$': 'blocks.\\1.ffn.w3.\\2', '^final_layer\\.adaLN_modulation\\.1\\.(.*)$': 'final_layer.adaln_linear.\\1', '^final_layer\\.norm_final\\.(.*)$': 'final_layer.norm.\\1', '^final_layer\\.linear\\.(.*)$': 'final_layer.proj.\\1'})(), reverse_param_names_mapping: dict = (lambda: {})(), lora_param_names_mapping: dict = (lambda: {})(), _supported_attention_backends: tuple = (lambda: (FLASH_ATTN, TORCH_SDPA))(), hidden_size: int = 4096, num_attention_heads: int = 32, num_channels_latents: int = 16, in_channels: int = 16, out_channels: int = 16, exclude_lora_layers: list[str] = (lambda: [])(), boundary_ratio: float | None = None, depth: int = 48, attention_head_dim: int = 128, patch_size: tuple[int, int, int] = (1, 2, 2), caption_channels: int = 4096, adaln_tembed_dim: int = 512, frequency_embedding_size: int = 256, mlp_ratio: int = 4, text_tokens_zero_pad: bool = True, enable_bsa: bool = False, bsa_params: dict | None = (lambda: {'sparsity': 0.9375, 'cdf_threshold': None, 'chunk_3d_shape_q': [4, 4, 4], 'chunk_3d_shape_k': [4, 4, 4]})())
Bases: DiTArchConfig
Architecture configuration for native LongCat Video DiT.
fastvideo.configs.models.dits.longcat.LongCatVideoConfig
dataclass
¶LongCatVideoConfig(arch_config: DiTArchConfig = LongCatVideoArchConfig(), prefix: str = 'longcat', quant_config: QuantizationConfig | None = None)
Functions¶
fastvideo.configs.models.dits.longcat.is_longcat_blocks
¶
fastvideo.configs.models.encoders
¶
Classes¶
fastvideo.configs.models.encoders.Reason1ArchConfig
dataclass
¶
Reason1ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), architectures: list[str] = (lambda: ['Qwen2_5_VLForConditionalGeneration'])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = True, use_return_dict: bool = True, vocab_size: int = 152064, hidden_size: int = 3584, num_hidden_layers: int = 28, num_attention_heads: int = 28, pad_token_id: int = 151643, eos_token_id: int = 151645, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [])(), model_type: str = 'qwen2_5_vl', num_key_value_heads: int = 4, intermediate_size: int = 18944, bos_token_id: int = 151643, image_token_id: int = 151655, video_token_id: int = 151656, vision_token_id: int = 151654, vision_start_token_id: int = 151652, vision_end_token_id: int = 151653, vision_config: dict[str, Any] | None = None, rope_theta: float = 1000000.0, rope_scaling: dict[str, Any] | None = (lambda: {'type': 'mrope', 'mrope_section': [16, 24, 24]})(), max_position_embeddings: int = 128000, max_window_layers: int = 28, embedding_concat_strategy: str = 'mean_pooling', n_layers_per_group: int = 5, num_embedding_padding_tokens: int = 512, attention_dropout: float = 0.0, hidden_act: str = 'silu', initializer_range: float = 0.02, rms_norm_eps: float = 1e-06, use_sliding_window: bool = False, sliding_window: int = 32768, use_cache: bool = False, torch_dtype: str = 'bfloat16', _attn_implementation: str = 'flash_attention_2')
Bases: TextEncoderArchConfig
Architecture settings (defaults match Qwen2.5-VL-7B-Instruct).
fastvideo.configs.models.encoders.Reason1Config
dataclass
¶
Reason1Config(arch_config: Reason1ArchConfig = Reason1ArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False, tokenizer_type: str = 'Qwen/Qwen2.5-VL-7B-Instruct')
Bases: TextEncoderConfig
Reason1 text encoder config.
fastvideo.configs.models.encoders.T5LargeConfig
dataclass
¶
T5LargeConfig(arch_config: TextEncoderArchConfig = T5LargeArchConfig(), prefix: str = 't5', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False)
Bases: TextEncoderConfig
T5 Large configuration for your specific model.
Modules¶
fastvideo.configs.models.encoders.reason1
¶
Config for Reason1 (Qwen2.5-VL) text encoder.
Classes¶
fastvideo.configs.models.encoders.reason1.Reason1ArchConfig
dataclass
¶Reason1ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), architectures: list[str] = (lambda: ['Qwen2_5_VLForConditionalGeneration'])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = True, use_return_dict: bool = True, vocab_size: int = 152064, hidden_size: int = 3584, num_hidden_layers: int = 28, num_attention_heads: int = 28, pad_token_id: int = 151643, eos_token_id: int = 151645, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [])(), model_type: str = 'qwen2_5_vl', num_key_value_heads: int = 4, intermediate_size: int = 18944, bos_token_id: int = 151643, image_token_id: int = 151655, video_token_id: int = 151656, vision_token_id: int = 151654, vision_start_token_id: int = 151652, vision_end_token_id: int = 151653, vision_config: dict[str, Any] | None = None, rope_theta: float = 1000000.0, rope_scaling: dict[str, Any] | None = (lambda: {'type': 'mrope', 'mrope_section': [16, 24, 24]})(), max_position_embeddings: int = 128000, max_window_layers: int = 28, embedding_concat_strategy: str = 'mean_pooling', n_layers_per_group: int = 5, num_embedding_padding_tokens: int = 512, attention_dropout: float = 0.0, hidden_act: str = 'silu', initializer_range: float = 0.02, rms_norm_eps: float = 1e-06, use_sliding_window: bool = False, sliding_window: int = 32768, use_cache: bool = False, torch_dtype: str = 'bfloat16', _attn_implementation: str = 'flash_attention_2')
Bases: TextEncoderArchConfig
Architecture settings (defaults match Qwen2.5-VL-7B-Instruct).
fastvideo.configs.models.encoders.reason1.Reason1Config
dataclass
¶Reason1Config(arch_config: Reason1ArchConfig = Reason1ArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False, tokenizer_type: str = 'Qwen/Qwen2.5-VL-7B-Instruct')
Bases: TextEncoderConfig
Reason1 text encoder config.
fastvideo.configs.models.encoders.t5
¶
Classes¶
fastvideo.configs.models.encoders.t5.T5LargeArchConfig
dataclass
¶T5LargeArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = (lambda: [('.qkv_proj', '.q', 'q'), ('.qkv_proj', '.k', 'k'), ('.qkv_proj', '.v', 'v')])(), architectures: list[str] = (lambda: [])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = False, use_return_dict: bool = True, vocab_size: int = 32128, hidden_size: int = 0, num_hidden_layers: int = 0, num_attention_heads: int = 0, pad_token_id: int = 0, eos_token_id: int = 1, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [_is_transformer_layer, _is_embeddings, _is_final_layernorm])(), d_model: int = 1024, d_kv: int = 128, d_ff: int = 65536, num_layers: int = 24, num_decoder_layers: int | None = 24, num_heads: int = 128, relative_attention_num_buckets: int = 32, relative_attention_max_distance: int = 128, dropout_rate: float = 0.1, layer_norm_epsilon: float = 1e-06, initializer_factor: float = 1.0, feed_forward_proj: str = 'relu', dense_act_fn: str = '', is_gated_act: bool = False, is_encoder_decoder: bool = True, use_cache: bool = True, classifier_dropout: float = 0.0, dtype: str | None = None, gradient_checkpointing: bool = False, n_positions: int = 512, task_specific_params: dict | None = None)
Bases: T5ArchConfig
T5 Large architecture config with parameters for your specific model.
fastvideo.configs.models.encoders.t5.T5LargeConfig
dataclass
¶T5LargeConfig(arch_config: TextEncoderArchConfig = T5LargeArchConfig(), prefix: str = 't5', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False)
Bases: TextEncoderConfig
T5 Large configuration for your specific model.
fastvideo.configs.models.vaes
¶
Classes¶
fastvideo.configs.models.vaes.Cosmos25VAEConfig
dataclass
¶
Cosmos25VAEConfig(arch_config: Cosmos25VAEArchConfig = Cosmos25VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = False, use_temporal_tiling: bool = False, use_parallel_tiling: bool = False, use_temporal_scaling_frames: bool = True, use_feature_cache: bool = True)
Modules¶
fastvideo.configs.models.vaes.base
¶
Classes¶
fastvideo.configs.models.vaes.base.VAEConfig
dataclass
¶VAEConfig(arch_config: VAEArchConfig = VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = True, use_temporal_tiling: bool = True, use_parallel_tiling: bool = True, use_temporal_scaling_frames: bool = True)
Bases: ModelConfig
fastvideo.configs.models.vaes.base.VAEConfig.add_cli_args
staticmethod
¶Add CLI arguments for VAEConfig fields
Source code in fastvideo/configs/models/vaes/base.py
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 | |
fastvideo.configs.models.vaes.cosmos2_5vae
¶
Cosmos 2.5 (Wan2.1-style) VAE config and checkpoint-key mapping.
Classes¶
fastvideo.configs.models.vaes.cosmos2_5vae.Cosmos25VAEArchConfig
dataclass
¶Cosmos25VAEArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), scaling_factor: float | Tensor = 0, temporal_compression_ratio: int = 4, spatial_compression_ratio: int = 8, _name_or_path: str = '', base_dim: int = 96, decoder_base_dim: int | None = None, z_dim: int = 16, dim_mult: tuple[int, ...] = (1, 2, 4, 4), num_res_blocks: int = 2, attn_scales: tuple[float, ...] = (), temperal_downsample: tuple[bool, ...] = (False, True, True), dropout: float = 0.0, is_residual: bool = False, in_channels: int = 3, out_channels: int = 3, patch_size: int | None = None, scale_factor_temporal: int = 4, scale_factor_spatial: int = 8, clip_output: bool = True, latents_mean: tuple[float, ...] = (-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921), latents_std: tuple[float, ...] = (2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.916), param_names_mapping: dict[str, str] = (lambda: {'^conv1\\.(.*)$': 'quant_conv.\\1', '^conv2\\.(.*)$': 'post_quant_conv.\\1', '^encoder\\.conv1\\.(.*)$': 'encoder.conv_in.\\1', '^decoder\\.conv1\\.(.*)$': 'decoder.conv_in.\\1', '^encoder\\.head\\.0\\.gamma$': 'encoder.norm_out.gamma', '^encoder\\.head\\.2\\.(.*)$': 'encoder.conv_out.\\1', '^decoder\\.head\\.0\\.gamma$': 'decoder.norm_out.gamma', '^decoder\\.head\\.2\\.(.*)$': 'decoder.conv_out.\\1'})())
Bases: VAEArchConfig
fastvideo.configs.models.vaes.cosmos2_5vae.Cosmos25VAEArchConfig.map_official_key
staticmethod
¶Map a single official checkpoint key into FastVideo key space.
Source code in fastvideo/configs/models/vaes/cosmos2_5vae.py
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 | |
fastvideo.configs.models.vaes.cosmos2_5vae.Cosmos25VAEConfig
dataclass
¶Cosmos25VAEConfig(arch_config: Cosmos25VAEArchConfig = Cosmos25VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = False, use_temporal_tiling: bool = False, use_parallel_tiling: bool = False, use_temporal_scaling_frames: bool = True, use_feature_cache: bool = True)