Skip to content

configs

Modules

fastvideo.configs.configs

Classes

fastvideo.configs.configs.DatasetType

Bases: str, Enum

Enumeration for different dataset types.

Functions
fastvideo.configs.configs.DatasetType.choices classmethod
choices() -> list[str]

Get all available choices as strings for argparse.

Source code in fastvideo/configs/configs.py
@classmethod
def choices(cls) -> list[str]:
    """Get all available choices as strings for argparse."""
    return [dataset_type.value for dataset_type in cls]
fastvideo.configs.configs.DatasetType.from_string classmethod
from_string(value: str) -> DatasetType

Convert string to DatasetType enum.

Source code in fastvideo/configs/configs.py
@classmethod
def from_string(cls, value: str) -> "DatasetType":
    """Convert string to DatasetType enum."""
    try:
        return cls(value.lower())
    except ValueError:
        raise ValueError(
            f"Invalid dataset type: {value}. Must be one of: {', '.join([m.value for m in cls])}"
        ) from None
fastvideo.configs.configs.PreprocessConfig dataclass
PreprocessConfig(model_path: str = '', dataset_path: str = '', dataset_type: DatasetType = HF, dataset_output_dir: str = './output', dataloader_num_workers: int = 1, preprocess_video_batch_size: int = 2, samples_per_file: int = 64, flush_frequency: int = 256, video_loader_type: VideoLoaderType = TORCHCODEC, max_height: int = 480, max_width: int = 848, num_frames: int = 163, video_length_tolerance_range: float = 2.0, train_fps: int = 30, speed_factor: float = 1.0, drop_short_ratio: float = 1.0, do_temporal_sample: bool = False, training_cfg_rate: float = 0.0, seed: int = 42)

Configuration for preprocessing operations.

Functions
fastvideo.configs.configs.PreprocessConfig.add_cli_args staticmethod
add_cli_args(parser: FlexibleArgumentParser, prefix: str = 'preprocess') -> FlexibleArgumentParser

Add preprocessing configuration arguments to the parser.

Source code in fastvideo/configs/configs.py
@staticmethod
def add_cli_args(parser: FlexibleArgumentParser,
                 prefix: str = "preprocess") -> FlexibleArgumentParser:
    """Add preprocessing configuration arguments to the parser."""
    prefix_with_dot = f"{prefix}." if (prefix.strip() != "") else ""

    preprocess_args = parser.add_argument_group("Preprocessing Arguments")
    # Model & Dataset
    preprocess_args.add_argument(f"--{prefix_with_dot}model-path",
                                 type=str,
                                 default=PreprocessConfig.model_path,
                                 help="Path to the model for preprocessing")
    preprocess_args.add_argument(
        f"--{prefix_with_dot}dataset-path",
        type=str,
        default=PreprocessConfig.dataset_path,
        help="Path to the dataset directory for preprocessing")
    preprocess_args.add_argument(
        f"--{prefix_with_dot}dataset-type",
        type=str,
        choices=DatasetType.choices(),
        default=PreprocessConfig.dataset_type.value,
        help="Type of the dataset")
    preprocess_args.add_argument(
        f"--{prefix_with_dot}dataset-output-dir",
        type=str,
        default=PreprocessConfig.dataset_output_dir,
        help="The output directory where the dataset will be written.")

    # Dataloader
    preprocess_args.add_argument(
        f"--{prefix_with_dot}dataloader-num-workers",
        type=int,
        default=PreprocessConfig.dataloader_num_workers,
        help=
        "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
    )
    preprocess_args.add_argument(
        f"--{prefix_with_dot}preprocess-video-batch-size",
        type=int,
        default=PreprocessConfig.preprocess_video_batch_size,
        help="Batch size (per device) for the training dataloader.")

    # Saver
    preprocess_args.add_argument(f"--{prefix_with_dot}samples-per-file",
                                 type=int,
                                 default=PreprocessConfig.samples_per_file,
                                 help="Number of samples per output file")
    preprocess_args.add_argument(f"--{prefix_with_dot}flush-frequency",
                                 type=int,
                                 default=PreprocessConfig.flush_frequency,
                                 help="How often to save to parquet files")

    # Video processing parameters
    preprocess_args.add_argument(
        f"--{prefix_with_dot}video-loader-type",
        type=str,
        choices=VideoLoaderType.choices(),
        default=PreprocessConfig.video_loader_type.value,
        help="Type of the video loader")
    preprocess_args.add_argument(f"--{prefix_with_dot}max-height",
                                 type=int,
                                 default=PreprocessConfig.max_height,
                                 help="Maximum height for video processing")
    preprocess_args.add_argument(f"--{prefix_with_dot}max-width",
                                 type=int,
                                 default=PreprocessConfig.max_width,
                                 help="Maximum width for video processing")
    preprocess_args.add_argument(f"--{prefix_with_dot}num-frames",
                                 type=int,
                                 default=PreprocessConfig.num_frames,
                                 help="Number of frames to process")
    preprocess_args.add_argument(
        f"--{prefix_with_dot}video-length-tolerance-range",
        type=float,
        default=PreprocessConfig.video_length_tolerance_range,
        help="Video length tolerance range")
    preprocess_args.add_argument(f"--{prefix_with_dot}train-fps",
                                 type=int,
                                 default=PreprocessConfig.train_fps,
                                 help="Training FPS")
    preprocess_args.add_argument(f"--{prefix_with_dot}speed-factor",
                                 type=float,
                                 default=PreprocessConfig.speed_factor,
                                 help="Speed factor for video processing")
    preprocess_args.add_argument(f"--{prefix_with_dot}drop-short-ratio",
                                 type=float,
                                 default=PreprocessConfig.drop_short_ratio,
                                 help="Ratio for dropping short videos")
    preprocess_args.add_argument(
        f"--{prefix_with_dot}do-temporal-sample",
        action=StoreBoolean,
        default=PreprocessConfig.do_temporal_sample,
        help="Whether to do temporal sampling")

    # Model Training configuration
    preprocess_args.add_argument(f"--{prefix_with_dot}training-cfg-rate",
                                 type=float,
                                 default=PreprocessConfig.training_cfg_rate,
                                 help="Training CFG rate")
    preprocess_args.add_argument(f"--{prefix_with_dot}seed",
                                 type=int,
                                 default=PreprocessConfig.seed,
                                 help="Seed for random number generator")

    return parser
fastvideo.configs.configs.PreprocessConfig.from_kwargs classmethod
from_kwargs(kwargs: dict[str, Any]) -> Optional[PreprocessConfig]

Create PreprocessConfig from keyword arguments.

Source code in fastvideo/configs/configs.py
@classmethod
def from_kwargs(cls, kwargs: dict[str,
                                  Any]) -> Optional["PreprocessConfig"]:
    """Create PreprocessConfig from keyword arguments."""
    if 'dataset_type' in kwargs and isinstance(kwargs['dataset_type'], str):
        kwargs['dataset_type'] = DatasetType.from_string(
            kwargs['dataset_type'])
    if 'video_loader_type' in kwargs and isinstance(
            kwargs['video_loader_type'], str):
        kwargs['video_loader_type'] = VideoLoaderType.from_string(
            kwargs['video_loader_type'])

    preprocess_config = cls()
    if not update_config_from_args(
            preprocess_config, kwargs, prefix="preprocess", pop_args=True):
        return None
    return preprocess_config
fastvideo.configs.configs.VideoLoaderType

Bases: str, Enum

Enumeration for different video loaders.

Functions
fastvideo.configs.configs.VideoLoaderType.choices classmethod
choices() -> list[str]

Get all available choices as strings for argparse.

Source code in fastvideo/configs/configs.py
@classmethod
def choices(cls) -> list[str]:
    """Get all available choices as strings for argparse."""
    return [video_loader.value for video_loader in cls]
fastvideo.configs.configs.VideoLoaderType.from_string classmethod
from_string(value: str) -> VideoLoaderType

Convert string to VideoLoader enum.

Source code in fastvideo/configs/configs.py
@classmethod
def from_string(cls, value: str) -> "VideoLoaderType":
    """Convert string to VideoLoader enum."""
    try:
        return cls(value.lower())
    except ValueError:
        raise ValueError(
            f"Invalid video loader: {value}. Must be one of: {', '.join([m.value for m in cls])}"
        ) from None

Functions

fastvideo.configs.models

Classes

fastvideo.configs.models.DiTConfig dataclass
DiTConfig(arch_config: DiTArchConfig = DiTArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None)

Bases: ModelConfig

Functions
fastvideo.configs.models.DiTConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'dit-config') -> Any

Add CLI arguments for DiTConfig fields

Source code in fastvideo/configs/models/dits/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "dit-config") -> Any:
    """Add CLI arguments for DiTConfig fields"""
    parser.add_argument(
        f"--{prefix}.prefix",
        type=str,
        dest=f"{prefix.replace('-', '_')}.prefix",
        default=DiTConfig.prefix,
        help="Prefix for the DiT model",
    )

    parser.add_argument(
        f"--{prefix}.quant-config",
        type=str,
        dest=f"{prefix.replace('-', '_')}.quant_config",
        default=None,
        help="Quantization configuration for the DiT model",
    )

    return parser
fastvideo.configs.models.VAEConfig dataclass
VAEConfig(arch_config: VAEArchConfig = VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = True, use_temporal_tiling: bool = True, use_parallel_tiling: bool = True, use_temporal_scaling_frames: bool = True)

Bases: ModelConfig

Functions
fastvideo.configs.models.VAEConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'vae-config') -> Any

Add CLI arguments for VAEConfig fields

Source code in fastvideo/configs/models/vaes/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "vae-config") -> Any:
    """Add CLI arguments for VAEConfig fields"""
    parser.add_argument(
        f"--{prefix}.load-encoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_encoder",
        default=VAEConfig.load_encoder,
        help="Whether to load the VAE encoder",
    )
    parser.add_argument(
        f"--{prefix}.load-decoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_decoder",
        default=VAEConfig.load_decoder,
        help="Whether to load the VAE decoder",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_height",
        default=VAEConfig.tile_sample_min_height,
        help="Minimum height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_width",
        default=VAEConfig.tile_sample_min_width,
        help="Minimum width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_num_frames",
        default=VAEConfig.tile_sample_min_num_frames,
        help="Minimum number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_height",
        default=VAEConfig.tile_sample_stride_height,
        help="Stride height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_width",
        default=VAEConfig.tile_sample_stride_width,
        help="Stride width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_num_frames",
        default=VAEConfig.tile_sample_stride_num_frames,
        help="Stride number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.blend-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.blend_num_frames",
        default=VAEConfig.blend_num_frames,
        help="Number of frames to blend for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.use-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_tiling",
        default=VAEConfig.use_tiling,
        help="Whether to use tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-temporal-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_temporal_tiling",
        default=VAEConfig.use_temporal_tiling,
        help="Whether to use temporal tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-parallel-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_parallel_tiling",
        default=VAEConfig.use_parallel_tiling,
        help="Whether to use parallel tiling for VAE",
    )

    return parser

Modules

fastvideo.configs.models.audio
Modules
fastvideo.configs.models.audio.ltx2_audio_vae

LTX-2 audio VAE and vocoder configuration.

fastvideo.configs.models.dits
Classes
fastvideo.configs.models.dits.Cosmos25VideoConfig dataclass
Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Configuration for Cosmos 2.5 video generation model.

fastvideo.configs.models.dits.LTX2VideoConfig dataclass
LTX2VideoConfig(arch_config: DiTArchConfig = LTX2VideoArchConfig(), prefix: str = 'ltx2', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Main configuration for LTX-2 transformer.

fastvideo.configs.models.dits.LongCatVideoConfig dataclass
LongCatVideoConfig(arch_config: DiTArchConfig = LongCatVideoArchConfig(), prefix: str = 'longcat', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Main configuration for LongCat Video DiT.

Modules
fastvideo.configs.models.dits.base
Classes
fastvideo.configs.models.dits.base.DiTConfig dataclass
DiTConfig(arch_config: DiTArchConfig = DiTArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None)

Bases: ModelConfig

Functions
fastvideo.configs.models.dits.base.DiTConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'dit-config') -> Any

Add CLI arguments for DiTConfig fields

Source code in fastvideo/configs/models/dits/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "dit-config") -> Any:
    """Add CLI arguments for DiTConfig fields"""
    parser.add_argument(
        f"--{prefix}.prefix",
        type=str,
        dest=f"{prefix.replace('-', '_')}.prefix",
        default=DiTConfig.prefix,
        help="Prefix for the DiT model",
    )

    parser.add_argument(
        f"--{prefix}.quant-config",
        type=str,
        dest=f"{prefix.replace('-', '_')}.quant_config",
        default=None,
        help="Quantization configuration for the DiT model",
    )

    return parser
fastvideo.configs.models.dits.cosmos2_5
Classes
fastvideo.configs.models.dits.cosmos2_5.Cosmos25ArchConfig dataclass
Cosmos25ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_transformer_blocks])(), _compile_conditions: list = list(), param_names_mapping: dict = (lambda: {'^net\\.x_embedder\\.proj\\.1\\.(.*)$': 'patch_embed.proj.\\1', '^net\\.t_embedder\\.1\\.linear_1\\.(.*)$': 'time_embed.t_embedder.linear_1.\\1', '^net\\.t_embedder\\.1\\.linear_2\\.(.*)$': 'time_embed.t_embedder.linear_2.\\1', '^net\\.t_embedding_norm\\.(.*)$': 'time_embed.norm.\\1', '^net\\.crossattn_proj\\.0\\.weight$': 'crossattn_proj.0.weight', '^net\\.crossattn_proj\\.0\\.bias$': 'crossattn_proj.0.bias', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn1.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.self_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.self_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn1.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.v_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.output_proj\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_q.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.weight$': 'transformer_blocks.\\1.attn2.norm_k.weight', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.q_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_q._extra_state', '^net\\.blocks\\.(\\d+)\\.cross_attn\\.k_norm\\._extra_state$': 'transformer_blocks.\\1.attn2.norm_k._extra_state', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer1\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_in.\\2', '^net\\.blocks\\.(\\d+)\\.mlp\\.layer2\\.(.*)$': 'transformer_blocks.\\1.mlp.fc_out.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_self_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_self_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_cross_attn\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_cross_attn.2.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.1\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.1.\\2', '^net\\.blocks\\.(\\d+)\\.adaln_modulation_mlp\\.2\\.(.*)$': 'transformer_blocks.\\1.adaln_modulation_mlp.2.\\2', '^net\\.blocks\\.(\\d+)\\.layer_norm_self_attn\\._extra_state$': 'transformer_blocks.\\1.norm1.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_cross_attn\\._extra_state$': 'transformer_blocks.\\1.norm2.norm._extra_state', '^net\\.blocks\\.(\\d+)\\.layer_norm_mlp\\._extra_state$': 'transformer_blocks.\\1.norm3.norm._extra_state', '^net\\.final_layer\\.linear\\.(.*)$': 'final_layer.proj_out.\\1', '^net\\.final_layer\\.adaln_modulation\\.1\\.(.*)$': 'final_layer.linear_1.\\1', '^net\\.final_layer\\.adaln_modulation\\.2\\.(.*)$': 'final_layer.linear_2.\\1'})(), reverse_param_names_mapping: dict = dict(), lora_param_names_mapping: dict = (lambda: {'^transformer_blocks\\.(\\d+)\\.attn1\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn1.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn1.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn1.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn1\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn1.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_q\\.(.*)$': 'transformer_blocks.\\1.attn2.to_q.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_k\\.(.*)$': 'transformer_blocks.\\1.attn2.to_k.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_v\\.(.*)$': 'transformer_blocks.\\1.attn2.to_v.\\2', '^transformer_blocks\\.(\\d+)\\.attn2\\.to_out\\.(.*)$': 'transformer_blocks.\\1.attn2.to_out.\\2', '^transformer_blocks\\.(\\d+)\\.mlp\\.(.*)$': 'transformer_blocks.\\1.mlp.\\2'})(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (SLIDING_TILE_ATTN, SAGE_ATTN, FLASH_ATTN, TORCH_SDPA, VIDEO_SPARSE_ATTN, VMOBA_ATTN, SAGE_ATTN_THREE, SLA_ATTN, SAGE_SLA_ATTN), hidden_size: int = 0, num_attention_heads: int = 16, num_channels_latents: int = 0, in_channels: int = 16, out_channels: int = 16, exclude_lora_layers: list[str] = (lambda: ['embedder'])(), boundary_ratio: float | None = None, attention_head_dim: int = 128, num_layers: int = 28, mlp_ratio: float = 4.0, text_embed_dim: int = 1024, adaln_lora_dim: int = 256, use_adaln_lora: bool = True, max_size: tuple[int, int, int] = (128, 240, 240), patch_size: tuple[int, int, int] = (1, 2, 2), rope_scale: tuple[float, float, float] = (1.0, 3.0, 3.0), concat_padding_mask: bool = True, extra_pos_embed_type: str | None = None, use_crossattn_projection: bool = False, crossattn_proj_in_channels: int = 100352, rope_enable_fps_modulation: bool = True, qk_norm: str = 'rms_norm', eps: float = 1e-06)

Bases: DiTArchConfig

Configuration for Cosmos 2.5 architecture (MiniTrainDIT).

fastvideo.configs.models.dits.cosmos2_5.Cosmos25VideoConfig dataclass
Cosmos25VideoConfig(arch_config: DiTArchConfig = Cosmos25ArchConfig(), prefix: str = 'Cosmos25', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Configuration for Cosmos 2.5 video generation model.

fastvideo.configs.models.dits.longcat

LongCat Video DiT configuration for native FastVideo implementation.

Classes
fastvideo.configs.models.dits.longcat.LongCatVideoArchConfig dataclass
LongCatVideoArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_longcat_blocks])(), _compile_conditions: list = (lambda: [is_longcat_blocks])(), param_names_mapping: dict = (lambda: {'^x_embedder\\.(.*)$': 'patch_embed.\\1', '^t_embedder\\.mlp\\.0\\.(.*)$': 'time_embedder.linear_1.\\1', '^t_embedder\\.mlp\\.2\\.(.*)$': 'time_embedder.linear_2.\\1', '^y_embedder\\.y_proj\\.0\\.(.*)$': 'caption_embedder.linear_1.\\1', '^y_embedder\\.y_proj\\.2\\.(.*)$': 'caption_embedder.linear_2.\\1', '^blocks\\.(\\d+)\\.adaLN_modulation\\.1\\.(.*)$': 'blocks.\\1.adaln_linear_1.\\2', '^blocks\\.(\\d+)\\.mod_norm_attn\\.(.*)$': 'blocks.\\1.norm_attn.\\2', '^blocks\\.(\\d+)\\.mod_norm_ffn\\.(.*)$': 'blocks.\\1.norm_ffn.\\2', '^blocks\\.(\\d+)\\.pre_crs_attn_norm\\.(.*)$': 'blocks.\\1.norm_cross.\\2', '^blocks\\.(\\d+)\\.attn\\.qkv\\.(.*)$': 'blocks.\\1.self_attn.qkv_fused.\\2', '^blocks\\.(\\d+)\\.attn\\.proj\\.(.*)$': 'blocks.\\1.self_attn.to_out.\\2', '^blocks\\.(\\d+)\\.attn\\.q_norm\\.(.*)$': 'blocks.\\1.self_attn.q_norm.\\2', '^blocks\\.(\\d+)\\.attn\\.k_norm\\.(.*)$': 'blocks.\\1.self_attn.k_norm.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.q_linear\\.(.*)$': 'blocks.\\1.cross_attn.to_q.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.kv_linear\\.(.*)$': 'blocks.\\1.cross_attn.kv_fused.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.proj\\.(.*)$': 'blocks.\\1.cross_attn.to_out.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.q_norm\\.(.*)$': 'blocks.\\1.cross_attn.q_norm.\\2', '^blocks\\.(\\d+)\\.cross_attn\\.k_norm\\.(.*)$': 'blocks.\\1.cross_attn.k_norm.\\2', '^blocks\\.(\\d+)\\.ffn\\.w1\\.(.*)$': 'blocks.\\1.ffn.w1.\\2', '^blocks\\.(\\d+)\\.ffn\\.w2\\.(.*)$': 'blocks.\\1.ffn.w2.\\2', '^blocks\\.(\\d+)\\.ffn\\.w3\\.(.*)$': 'blocks.\\1.ffn.w3.\\2', '^final_layer\\.adaLN_modulation\\.1\\.(.*)$': 'final_layer.adaln_linear.\\1', '^final_layer\\.norm_final\\.(.*)$': 'final_layer.norm.\\1', '^final_layer\\.linear\\.(.*)$': 'final_layer.proj.\\1'})(), reverse_param_names_mapping: dict = (lambda: {})(), lora_param_names_mapping: dict = (lambda: {})(), _supported_attention_backends: tuple = (lambda: (FLASH_ATTN, TORCH_SDPA))(), hidden_size: int = 4096, num_attention_heads: int = 32, num_channels_latents: int = 16, in_channels: int = 16, out_channels: int = 16, exclude_lora_layers: list[str] = (lambda: [])(), boundary_ratio: float | None = None, depth: int = 48, attention_head_dim: int = 128, patch_size: tuple[int, int, int] = (1, 2, 2), caption_channels: int = 4096, adaln_tembed_dim: int = 512, frequency_embedding_size: int = 256, mlp_ratio: int = 4, text_tokens_zero_pad: bool = True, enable_bsa: bool = False, bsa_params: dict | None = (lambda: {'sparsity': 0.9375, 'cdf_threshold': None, 'chunk_3d_shape_q': [4, 4, 4], 'chunk_3d_shape_k': [4, 4, 4]})())

Bases: DiTArchConfig

Architecture configuration for native LongCat Video DiT.

fastvideo.configs.models.dits.longcat.LongCatVideoConfig dataclass
LongCatVideoConfig(arch_config: DiTArchConfig = LongCatVideoArchConfig(), prefix: str = 'longcat', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Main configuration for LongCat Video DiT.

Functions
fastvideo.configs.models.dits.longcat.is_longcat_blocks
is_longcat_blocks(n: str, m) -> bool

FSDP shard condition for LongCat transformer blocks.

Source code in fastvideo/configs/models/dits/longcat.py
def is_longcat_blocks(n: str, m) -> bool:
    """FSDP shard condition for LongCat transformer blocks."""
    return "blocks" in n and str.isdigit(n.split(".")[-1])
fastvideo.configs.models.dits.ltx2

LTX-2 Transformer configuration for native FastVideo integration.

Classes
fastvideo.configs.models.dits.ltx2.LTX2VideoArchConfig dataclass
LTX2VideoArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = (lambda: [is_ltx2_blocks])(), _compile_conditions: list = (lambda: [is_ltx2_blocks])(), param_names_mapping: dict = (lambda: {'^model\\.diffusion_model\\.(.*)$': 'model.\\1', '^diffusion_model\\.(.*)$': 'model.\\1', '^model\\.(.*)$': 'model.\\1', '^(.*)$': 'model.\\1'})(), reverse_param_names_mapping: dict = (lambda: {})(), lora_param_names_mapping: dict = (lambda: {})(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (SLIDING_TILE_ATTN, SAGE_ATTN, FLASH_ATTN, TORCH_SDPA, VIDEO_SPARSE_ATTN, VMOBA_ATTN, SAGE_ATTN_THREE, SLA_ATTN, SAGE_SLA_ATTN), hidden_size: int = 0, num_attention_heads: int = 32, num_channels_latents: int = 128, in_channels: int | None = None, out_channels: int | None = None, exclude_lora_layers: list[str] = list(), boundary_ratio: float | None = None, attention_head_dim: int = 128, num_layers: int = 48, cross_attention_dim: int = 4096, caption_channels: int = 3840, norm_eps: float = 1e-06, attention_type: str = 'default', rope_type: str = 'split', double_precision_rope: bool = True, positional_embedding_theta: float = 10000.0, positional_embedding_max_pos: list[int] = (lambda: [20, 2048, 2048])(), timestep_scale_multiplier: int = 1000, use_middle_indices_grid: bool = True, patch_size: tuple[int, int, int] = (1, 1, 1), audio_num_attention_heads: int = 32, audio_attention_head_dim: int = 64, audio_in_channels: int = 128, audio_out_channels: int = 128, audio_cross_attention_dim: int = 2048, audio_positional_embedding_max_pos: list[int] = (lambda: [20])(), av_ca_timestep_scale_multiplier: int = 1)

Bases: DiTArchConfig

Architecture configuration for LTX-2 video transformer.

fastvideo.configs.models.dits.ltx2.LTX2VideoConfig dataclass
LTX2VideoConfig(arch_config: DiTArchConfig = LTX2VideoArchConfig(), prefix: str = 'ltx2', quant_config: QuantizationConfig | None = None)

Bases: DiTConfig

Main configuration for LTX-2 transformer.

Functions
fastvideo.configs.models.dits.ltx2.is_ltx2_blocks
is_ltx2_blocks(name: str, _module) -> bool

FSDP shard condition for LTX-2 transformer blocks.

Source code in fastvideo/configs/models/dits/ltx2.py
def is_ltx2_blocks(name: str, _module) -> bool:
    """FSDP shard condition for LTX-2 transformer blocks."""
    return "transformer_blocks" in name
fastvideo.configs.models.encoders
Classes
fastvideo.configs.models.encoders.Reason1ArchConfig dataclass
Reason1ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), architectures: list[str] = (lambda: ['Qwen2_5_VLForConditionalGeneration'])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = True, use_return_dict: bool = True, vocab_size: int = 152064, hidden_size: int = 3584, num_hidden_layers: int = 28, num_attention_heads: int = 28, pad_token_id: int = 151643, eos_token_id: int = 151645, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [])(), model_type: str = 'qwen2_5_vl', num_key_value_heads: int = 4, intermediate_size: int = 18944, bos_token_id: int = 151643, image_token_id: int = 151655, video_token_id: int = 151656, vision_token_id: int = 151654, vision_start_token_id: int = 151652, vision_end_token_id: int = 151653, vision_config: dict[str, Any] | None = None, rope_theta: float = 1000000.0, rope_scaling: dict[str, Any] | None = (lambda: {'type': 'mrope', 'mrope_section': [16, 24, 24]})(), max_position_embeddings: int = 128000, max_window_layers: int = 28, embedding_concat_strategy: str = 'mean_pooling', n_layers_per_group: int = 5, num_embedding_padding_tokens: int = 512, attention_dropout: float = 0.0, hidden_act: str = 'silu', initializer_range: float = 0.02, rms_norm_eps: float = 1e-06, use_sliding_window: bool = False, sliding_window: int = 32768, use_cache: bool = False, torch_dtype: str = 'bfloat16', _attn_implementation: str = 'flash_attention_2')

Bases: TextEncoderArchConfig

Architecture settings (defaults match Qwen2.5-VL-7B-Instruct).

fastvideo.configs.models.encoders.Reason1Config dataclass
Reason1Config(arch_config: Reason1ArchConfig = Reason1ArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False, tokenizer_type: str = 'Qwen/Qwen2.5-VL-7B-Instruct')

Bases: TextEncoderConfig

Reason1 text encoder config.

fastvideo.configs.models.encoders.SiglipVisionConfig dataclass
SiglipVisionConfig(arch_config: ImageEncoderArchConfig = SiglipVisionArchConfig(), prefix: str = 'siglip', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, num_hidden_layers_override: int | None = None, require_post_norm: bool | None = None, enable_scale: bool = True, is_causal: bool = False)

Bases: ImageEncoderConfig

Configuration for SigLIP vision encoder.

fastvideo.configs.models.encoders.T5LargeConfig dataclass
T5LargeConfig(arch_config: TextEncoderArchConfig = T5LargeArchConfig(), prefix: str = 't5', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False)

Bases: TextEncoderConfig

T5 Large configuration for your specific model.

Modules
fastvideo.configs.models.encoders.reason1

Config for Reason1 (Qwen2.5-VL) text encoder.

Classes
fastvideo.configs.models.encoders.reason1.Reason1ArchConfig dataclass
Reason1ArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), architectures: list[str] = (lambda: ['Qwen2_5_VLForConditionalGeneration'])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = True, use_return_dict: bool = True, vocab_size: int = 152064, hidden_size: int = 3584, num_hidden_layers: int = 28, num_attention_heads: int = 28, pad_token_id: int = 151643, eos_token_id: int = 151645, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [])(), model_type: str = 'qwen2_5_vl', num_key_value_heads: int = 4, intermediate_size: int = 18944, bos_token_id: int = 151643, image_token_id: int = 151655, video_token_id: int = 151656, vision_token_id: int = 151654, vision_start_token_id: int = 151652, vision_end_token_id: int = 151653, vision_config: dict[str, Any] | None = None, rope_theta: float = 1000000.0, rope_scaling: dict[str, Any] | None = (lambda: {'type': 'mrope', 'mrope_section': [16, 24, 24]})(), max_position_embeddings: int = 128000, max_window_layers: int = 28, embedding_concat_strategy: str = 'mean_pooling', n_layers_per_group: int = 5, num_embedding_padding_tokens: int = 512, attention_dropout: float = 0.0, hidden_act: str = 'silu', initializer_range: float = 0.02, rms_norm_eps: float = 1e-06, use_sliding_window: bool = False, sliding_window: int = 32768, use_cache: bool = False, torch_dtype: str = 'bfloat16', _attn_implementation: str = 'flash_attention_2')

Bases: TextEncoderArchConfig

Architecture settings (defaults match Qwen2.5-VL-7B-Instruct).

fastvideo.configs.models.encoders.reason1.Reason1Config dataclass
Reason1Config(arch_config: Reason1ArchConfig = Reason1ArchConfig(), prefix: str = '', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False, tokenizer_type: str = 'Qwen/Qwen2.5-VL-7B-Instruct')

Bases: TextEncoderConfig

Reason1 text encoder config.

fastvideo.configs.models.encoders.siglip

SigLIP vision encoder configuration for FastVideo.

Classes
fastvideo.configs.models.encoders.siglip.SiglipVisionArchConfig dataclass
SiglipVisionArchConfig(stacked_params_mapping: list = (lambda: [('qkv_proj', 'q_proj', 'q'), ('qkv_proj', 'k_proj', 'k'), ('qkv_proj', 'v_proj', 'v')])(), architectures: list[str] = (lambda: ['SiglipVisionModel'])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = False, use_return_dict: bool = True, attention_dropout: float = 0.0, dtype: str | None = None, hidden_act: str = 'gelu_pytorch_tanh', hidden_size: int = 1152, image_size: int = 384, intermediate_size: int = 4304, layer_norm_eps: float = 1e-06, model_type: str = 'siglip_vision_model', num_attention_heads: int = 16, num_channels: int = 3, num_hidden_layers: int = 27, patch_size: int = 14)

Bases: ImageEncoderArchConfig

Architecture configuration for SigLIP vision encoder.

Fields match the config.json from HuggingFace SigLIP checkpoints.

fastvideo.configs.models.encoders.siglip.SiglipVisionConfig dataclass
SiglipVisionConfig(arch_config: ImageEncoderArchConfig = SiglipVisionArchConfig(), prefix: str = 'siglip', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, num_hidden_layers_override: int | None = None, require_post_norm: bool | None = None, enable_scale: bool = True, is_causal: bool = False)

Bases: ImageEncoderConfig

Configuration for SigLIP vision encoder.

fastvideo.configs.models.encoders.t5
Classes
fastvideo.configs.models.encoders.t5.T5LargeArchConfig dataclass
T5LargeArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = (lambda: [('.qkv_proj', '.q', 'q'), ('.qkv_proj', '.k', 'k'), ('.qkv_proj', '.v', 'v')])(), architectures: list[str] = (lambda: [])(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (FLASH_ATTN, TORCH_SDPA), output_hidden_states: bool = False, use_return_dict: bool = True, vocab_size: int = 32128, hidden_size: int = 0, num_hidden_layers: int = 0, num_attention_heads: int = 0, pad_token_id: int = 0, eos_token_id: int = 1, text_len: int = 512, hidden_state_skip_layer: int = 0, decoder_start_token_id: int = 0, output_past: bool = True, scalable_attention: bool = True, tie_word_embeddings: bool = False, tokenizer_kwargs: dict[str, Any] = dict(), _fsdp_shard_conditions: list = (lambda: [_is_transformer_layer, _is_embeddings, _is_final_layernorm])(), d_model: int = 1024, d_kv: int = 128, d_ff: int = 65536, num_layers: int = 24, num_decoder_layers: int | None = 24, num_heads: int = 128, relative_attention_num_buckets: int = 32, relative_attention_max_distance: int = 128, dropout_rate: float = 0.1, layer_norm_epsilon: float = 1e-06, initializer_factor: float = 1.0, feed_forward_proj: str = 'relu', dense_act_fn: str = '', is_gated_act: bool = False, is_encoder_decoder: bool = True, use_cache: bool = True, classifier_dropout: float = 0.0, dtype: str | None = None, gradient_checkpointing: bool = False, n_positions: int = 512, task_specific_params: dict | None = None)

Bases: T5ArchConfig

T5 Large architecture config with parameters for your specific model.

fastvideo.configs.models.encoders.t5.T5LargeConfig dataclass
T5LargeConfig(arch_config: TextEncoderArchConfig = T5LargeArchConfig(), prefix: str = 't5', quant_config: QuantizationConfig | None = None, lora_config: Any | None = None, is_chat_model: bool = False)

Bases: TextEncoderConfig

T5 Large configuration for your specific model.

fastvideo.configs.models.vaes
Classes
fastvideo.configs.models.vaes.Cosmos25VAEConfig dataclass
Cosmos25VAEConfig(arch_config: Cosmos25VAEArchConfig = Cosmos25VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = False, use_temporal_tiling: bool = False, use_parallel_tiling: bool = False, use_temporal_scaling_frames: bool = True, use_feature_cache: bool = True)

Bases: VAEConfig

Cosmos2.5 VAE config.

Modules
fastvideo.configs.models.vaes.base
Classes
fastvideo.configs.models.vaes.base.VAEConfig dataclass
VAEConfig(arch_config: VAEArchConfig = VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = True, use_temporal_tiling: bool = True, use_parallel_tiling: bool = True, use_temporal_scaling_frames: bool = True)

Bases: ModelConfig

Functions
fastvideo.configs.models.vaes.base.VAEConfig.add_cli_args staticmethod
add_cli_args(parser: Any, prefix: str = 'vae-config') -> Any

Add CLI arguments for VAEConfig fields

Source code in fastvideo/configs/models/vaes/base.py
@staticmethod
def add_cli_args(parser: Any, prefix: str = "vae-config") -> Any:
    """Add CLI arguments for VAEConfig fields"""
    parser.add_argument(
        f"--{prefix}.load-encoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_encoder",
        default=VAEConfig.load_encoder,
        help="Whether to load the VAE encoder",
    )
    parser.add_argument(
        f"--{prefix}.load-decoder",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.load_decoder",
        default=VAEConfig.load_decoder,
        help="Whether to load the VAE decoder",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_height",
        default=VAEConfig.tile_sample_min_height,
        help="Minimum height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_width",
        default=VAEConfig.tile_sample_min_width,
        help="Minimum width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-min-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_min_num_frames",
        default=VAEConfig.tile_sample_min_num_frames,
        help="Minimum number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-height",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_height",
        default=VAEConfig.tile_sample_stride_height,
        help="Stride height for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-width",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_width",
        default=VAEConfig.tile_sample_stride_width,
        help="Stride width for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.tile-sample-stride-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.tile_sample_stride_num_frames",
        default=VAEConfig.tile_sample_stride_num_frames,
        help="Stride number of frames for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.blend-num-frames",
        type=int,
        dest=f"{prefix.replace('-', '_')}.blend_num_frames",
        default=VAEConfig.blend_num_frames,
        help="Number of frames to blend for VAE tile sampling",
    )
    parser.add_argument(
        f"--{prefix}.use-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_tiling",
        default=VAEConfig.use_tiling,
        help="Whether to use tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-temporal-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_temporal_tiling",
        default=VAEConfig.use_temporal_tiling,
        help="Whether to use temporal tiling for VAE",
    )
    parser.add_argument(
        f"--{prefix}.use-parallel-tiling",
        action=StoreBoolean,
        dest=f"{prefix.replace('-', '_')}.use_parallel_tiling",
        default=VAEConfig.use_parallel_tiling,
        help="Whether to use parallel tiling for VAE",
    )

    return parser
fastvideo.configs.models.vaes.cosmos2_5vae

Cosmos 2.5 (Wan2.1-style) VAE config and checkpoint-key mapping.

Classes
fastvideo.configs.models.vaes.cosmos2_5vae.Cosmos25VAEArchConfig dataclass
Cosmos25VAEArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), scaling_factor: float | Tensor = 0, temporal_compression_ratio: int = 4, spatial_compression_ratio: int = 8, _name_or_path: str = '', base_dim: int = 96, decoder_base_dim: int | None = None, z_dim: int = 16, dim_mult: tuple[int, ...] = (1, 2, 4, 4), num_res_blocks: int = 2, attn_scales: tuple[float, ...] = (), temperal_downsample: tuple[bool, ...] = (False, True, True), dropout: float = 0.0, is_residual: bool = False, in_channels: int = 3, out_channels: int = 3, patch_size: int | None = None, scale_factor_temporal: int = 4, scale_factor_spatial: int = 8, clip_output: bool = True, latents_mean: tuple[float, ...] = (-0.7571, -0.7089, -0.9113, 0.1075, -0.1745, 0.9653, -0.1517, 1.5508, 0.4134, -0.0715, 0.5517, -0.3632, -0.1922, -0.9497, 0.2503, -0.2921), latents_std: tuple[float, ...] = (2.8184, 1.4541, 2.3275, 2.6558, 1.2196, 1.7708, 2.6052, 2.0743, 3.2687, 2.1526, 2.8652, 1.5579, 1.6382, 1.1253, 2.8251, 1.916), param_names_mapping: dict[str, str] = (lambda: {'^conv1\\.(.*)$': 'quant_conv.\\1', '^conv2\\.(.*)$': 'post_quant_conv.\\1', '^encoder\\.conv1\\.(.*)$': 'encoder.conv_in.\\1', '^decoder\\.conv1\\.(.*)$': 'decoder.conv_in.\\1', '^encoder\\.head\\.0\\.gamma$': 'encoder.norm_out.gamma', '^encoder\\.head\\.2\\.(.*)$': 'encoder.conv_out.\\1', '^decoder\\.head\\.0\\.gamma$': 'decoder.norm_out.gamma', '^decoder\\.head\\.2\\.(.*)$': 'decoder.conv_out.\\1'})())

Bases: VAEArchConfig

Functions
fastvideo.configs.models.vaes.cosmos2_5vae.Cosmos25VAEArchConfig.map_official_key staticmethod
map_official_key(key: str) -> str | None

Map a single official checkpoint key into FastVideo key space.

Source code in fastvideo/configs/models/vaes/cosmos2_5vae.py
@staticmethod
def map_official_key(key: str) -> str | None:
    """Map a single official checkpoint key into FastVideo key space."""

    def map_residual_subkey(prefix: str, sub: str) -> str | None:
        if re.match(r"^residual\.0\.gamma$", sub):
            return f"{prefix}.norm1.gamma"
        m = re.match(r"^residual\.2\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.conv1.{m.group(1)}"
        if re.match(r"^residual\.3\.gamma$", sub):
            return f"{prefix}.norm2.gamma"
        m = re.match(r"^residual\.6\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.conv2.{m.group(1)}"
        m = re.match(r"^shortcut\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.conv_shortcut.{m.group(1)}"
        return None

    def map_attn_subkey(prefix: str, sub: str) -> str | None:
        if re.match(r"^norm\.gamma$", sub):
            return f"{prefix}.norm.gamma"
        m = re.match(r"^to_qkv\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.to_qkv.{m.group(1)}"
        m = re.match(r"^proj\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.proj.{m.group(1)}"
        return None

    def map_resample_subkey(prefix: str, sub: str) -> str | None:
        m = re.match(r"^resample\.1\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.resample.1.{m.group(1)}"
        m = re.match(r"^time_conv\.(weight|bias)$", sub)
        if m:
            return f"{prefix}.time_conv.{m.group(1)}"
        return None

    m = re.match(r"^conv1\.(weight|bias)$", key)
    if m:
        return f"quant_conv.{m.group(1)}"
    m = re.match(r"^conv2\.(weight|bias)$", key)
    if m:
        return f"post_quant_conv.{m.group(1)}"
    m = re.match(r"^(encoder|decoder)\.conv1\.(weight|bias)$", key)
    if m:
        return f"{m.group(1)}.conv_in.{m.group(2)}"
    m = re.match(r"^(encoder|decoder)\.head\.0\.gamma$", key)
    if m:
        return f"{m.group(1)}.norm_out.gamma"
    m = re.match(r"^(encoder|decoder)\.head\.2\.(weight|bias)$", key)
    if m:
        return f"{m.group(1)}.conv_out.{m.group(2)}"

    m = re.match(r"^(encoder|decoder)\.middle\.0\.(.*)$", key)
    if m:
        return map_residual_subkey(f"{m.group(1)}.mid_block.resnets.0",
                                   m.group(2))
    m = re.match(r"^(encoder|decoder)\.middle\.1\.(.*)$", key)
    if m:
        return map_attn_subkey(f"{m.group(1)}.mid_block.attentions.0",
                               m.group(2))
    m = re.match(r"^(encoder|decoder)\.middle\.2\.(.*)$", key)
    if m:
        return map_residual_subkey(f"{m.group(1)}.mid_block.resnets.1",
                                   m.group(2))

    m = re.match(r"^encoder\.downsamples\.(\d+)\.(.*)$", key)
    if m:
        idx = int(m.group(1))
        sub = m.group(2)
        if sub.startswith("residual.") or sub.startswith("shortcut."):
            return map_residual_subkey(f"encoder.down_blocks.{idx}", sub)
        if sub.startswith("resample.") or sub.startswith("time_conv."):
            return map_resample_subkey(f"encoder.down_blocks.{idx}", sub)
        return None

    m = re.match(r"^decoder\.upsamples\.(\d+)\.(.*)$", key)
    if m:
        uidx = int(m.group(1))
        sub = m.group(2)

        if uidx in (0, 1, 2):
            block_i, res_i = 0, uidx
        elif uidx == 3:
            block_i, res_i = 0, None
        elif uidx in (4, 5, 6):
            block_i, res_i = 1, uidx - 4
        elif uidx == 7:
            block_i, res_i = 1, None
        elif uidx in (8, 9, 10):
            block_i, res_i = 2, uidx - 8
        elif uidx == 11:
            block_i, res_i = 2, None
        elif uidx in (12, 13, 14):
            block_i, res_i = 3, uidx - 12
        else:
            return None

        if res_i is None:
            return map_resample_subkey(
                f"decoder.up_blocks.{block_i}.upsamplers.0",
                sub,
            )

        return map_residual_subkey(
            f"decoder.up_blocks.{block_i}.resnets.{res_i}",
            sub,
        )

    return None
fastvideo.configs.models.vaes.cosmos2_5vae.Cosmos25VAEConfig dataclass
Cosmos25VAEConfig(arch_config: Cosmos25VAEArchConfig = Cosmos25VAEArchConfig(), load_encoder: bool = True, load_decoder: bool = True, tile_sample_min_height: int = 256, tile_sample_min_width: int = 256, tile_sample_min_num_frames: int = 16, tile_sample_stride_height: int = 192, tile_sample_stride_width: int = 192, tile_sample_stride_num_frames: int = 12, blend_num_frames: int = 0, use_tiling: bool = False, use_temporal_tiling: bool = False, use_parallel_tiling: bool = False, use_temporal_scaling_frames: bool = True, use_feature_cache: bool = True)

Bases: VAEConfig

Cosmos2.5 VAE config.

fastvideo.configs.models.vaes.ltx2vae

LTX-2 VAE configuration.

Classes

fastvideo.configs.pipelines

Classes

fastvideo.configs.pipelines.Cosmos25Config dataclass
Cosmos25Config(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 0.0, flow_shift: float = 5.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = (lambda: Cosmos25VideoConfig(arch_config=Cosmos25ArchConfig(num_attention_heads=16, attention_head_dim=128, in_channels=16, out_channels=16, num_layers=28, patch_size=[1, 2, 2], max_size=[128, 240, 240], rope_scale=[1.0, 3.0, 3.0], text_embed_dim=1024, mlp_ratio=4.0, adaln_lora_dim=256, use_adaln_lora=True, concat_padding_mask=True, extra_pos_embed_type=None, use_crossattn_projection=True, rope_enable_fps_modulation=False, qk_norm='rms_norm')))(), dit_precision: str = 'bf16', vae_config: VAEConfig = Cosmos25VAEConfig(), vae_precision: str = 'bf16', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (Reason1Config(arch_config=Reason1ArchConfig(embedding_concat_strategy='full_concat')),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (_identity_preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (reason1_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = NONE, skip_time_steps: int = 0, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None)

Bases: PipelineConfig

Configuration for Cosmos 2.5 (Predict2.5) video generation pipeline.

fastvideo.configs.pipelines.CosmosConfig dataclass
CosmosConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: int = 6, flow_shift: float = 1.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = CosmosVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = CosmosVAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5LargeConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_large_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, conditioning_strategy: str = 'frame_replace', min_num_conditional_frames: int = 1, max_num_conditional_frames: int = 2, sigma_conditional: float = 0.0001, sigma_data: float = 1.0, state_ch: int = 16, state_t: int = 24, text_encoder_class: str = 'T5')

Bases: PipelineConfig

Configuration for Cosmos2 Video2World pipeline matching diffusers.

fastvideo.configs.pipelines.FastHunyuanConfig dataclass
FastHunyuanConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: int = 6, flow_shift: int = 17, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HunyuanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = HunyuanVAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (LlamaConfig(), CLIPTextConfig()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp16', 'fp16'))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (llama_preprocess_text, clip_preprocess_text))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (llama_postprocess_text, clip_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None)

Bases: HunyuanConfig

Configuration specifically optimized for FastHunyuan weights.

fastvideo.configs.pipelines.HYWorldConfig dataclass
HYWorldConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: int = 5, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HYWorldConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = Hunyuan15VAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = SiglipVisionConfig(), image_encoder_precision: str = 'fp16', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (Qwen2_5_VLConfig(), T5Config()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp16', 'fp32'))(), preprocess_text_funcs: tuple[Callable[[Any], Any], ...] = (lambda: (qwen_preprocess_text, byt5_preprocess_text))(), postprocess_text_funcs: tuple[Callable[..., Any], ...] = (lambda: (qwen_postprocess_text, byt5_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, text_encoder_crop_start: int = PROMPT_TEMPLATE_TOKEN_LENGTH, text_encoder_max_lengths: tuple[int, ...] = (lambda: (1000 + PROMPT_TEMPLATE_TOKEN_LENGTH, 256))())

Bases: Hunyuan15T2V480PConfig

Base configuration for HYWorld pipeline architecture.

fastvideo.configs.pipelines.Hunyuan15T2V480PConfig dataclass
Hunyuan15T2V480PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: int = 5, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HunyuanVideo15Config(), dit_precision: str = 'bf16', vae_config: VAEConfig = Hunyuan15VAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (Qwen2_5_VLConfig(), T5Config()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16', 'fp32'))(), preprocess_text_funcs: tuple[Callable[[Any], Any], ...] = (lambda: (qwen_preprocess_text, byt5_preprocess_text))(), postprocess_text_funcs: tuple[Callable[..., Any], ...] = (lambda: (qwen_postprocess_text, byt5_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, text_encoder_crop_start: int = PROMPT_TEMPLATE_TOKEN_LENGTH, text_encoder_max_lengths: tuple[int, ...] = (lambda: (1000 + PROMPT_TEMPLATE_TOKEN_LENGTH, 256))())

Bases: PipelineConfig

Base configuration for HunYuan pipeline architecture.

fastvideo.configs.pipelines.Hunyuan15T2V720PConfig dataclass
Hunyuan15T2V720PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: int = 9, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HunyuanVideo15Config(), dit_precision: str = 'bf16', vae_config: VAEConfig = Hunyuan15VAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (Qwen2_5_VLConfig(), T5Config()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16', 'fp32'))(), preprocess_text_funcs: tuple[Callable[[Any], Any], ...] = (lambda: (qwen_preprocess_text, byt5_preprocess_text))(), postprocess_text_funcs: tuple[Callable[..., Any], ...] = (lambda: (qwen_postprocess_text, byt5_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, text_encoder_crop_start: int = PROMPT_TEMPLATE_TOKEN_LENGTH, text_encoder_max_lengths: tuple[int, ...] = (lambda: (1000 + PROMPT_TEMPLATE_TOKEN_LENGTH, 256))())

Bases: Hunyuan15T2V480PConfig

Base configuration for HunYuan pipeline architecture.

fastvideo.configs.pipelines.HunyuanConfig dataclass
HunyuanConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: int = 6, flow_shift: int = 7, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HunyuanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = HunyuanVAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (LlamaConfig(), CLIPTextConfig()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp16', 'fp16'))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (llama_preprocess_text, clip_preprocess_text))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (llama_postprocess_text, clip_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None)

Bases: PipelineConfig

Base configuration for HunYuan pipeline architecture.

fastvideo.configs.pipelines.LTX2T2VConfig dataclass
LTX2T2VConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = None, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = LTX2VideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = LTX2VAEConfig(), vae_precision: str = 'bf16', vae_tiling: bool = True, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (LTX2GemmaConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (ltx2_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, audio_decoder_config: ModelConfig = LTX2AudioDecoderConfig(), vocoder_config: ModelConfig = LTX2VocoderConfig(), audio_decoder_precision: str = 'bf16', vocoder_precision: str = 'bf16')

Bases: PipelineConfig

Configuration for LTX-2 T2V pipeline.

fastvideo.configs.pipelines.PipelineConfig dataclass
PipelineConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = None, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = DiTConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = VAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (EncoderConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None)

Base configuration for all pipeline architectures.

Functions
fastvideo.configs.pipelines.PipelineConfig.from_kwargs classmethod
from_kwargs(kwargs: dict[str, Any], config_cli_prefix: str = '') -> PipelineConfig

Load PipelineConfig from kwargs Dictionary. kwargs: dictionary of kwargs config_cli_prefix: prefix of CLI arguments for this PipelineConfig instance

Source code in fastvideo/configs/pipelines/base.py
@classmethod
def from_kwargs(cls,
                kwargs: dict[str, Any],
                config_cli_prefix: str = "") -> "PipelineConfig":
    """
    Load PipelineConfig from kwargs Dictionary.
    kwargs: dictionary of kwargs
    config_cli_prefix: prefix of CLI arguments for this PipelineConfig instance
    """
    from fastvideo.registry import get_pipeline_config_cls_from_name

    prefix_with_dot = f"{config_cli_prefix}." if (config_cli_prefix.strip()
                                                  != "") else ""
    model_path: str | None = kwargs.get(prefix_with_dot + 'model_path',
                                        None) or kwargs.get('model_path')
    pipeline_config_or_path: str | PipelineConfig | dict[
        str, Any] | None = kwargs.get(prefix_with_dot + 'pipeline_config',
                                      None) or kwargs.get('pipeline_config')
    if model_path is None:
        raise ValueError("model_path is required in kwargs")

    # 1. Get the pipeline config class from the registry
    pipeline_config_cls = get_pipeline_config_cls_from_name(model_path)

    # 2. Instantiate PipelineConfig
    if pipeline_config_cls is None:
        logger.warning(
            "Couldn't find pipeline config for %s. Using the default pipeline config.",
            model_path)
        pipeline_config = cls()
    else:
        pipeline_config = pipeline_config_cls()

    # 3. Load PipelineConfig from a json file or a PipelineConfig object if provided
    if isinstance(pipeline_config_or_path, str):
        pipeline_config.load_from_json(pipeline_config_or_path)
        kwargs[prefix_with_dot +
               'pipeline_config_path'] = pipeline_config_or_path
    elif isinstance(pipeline_config_or_path, PipelineConfig):
        pipeline_config = pipeline_config_or_path
    elif isinstance(pipeline_config_or_path, dict):
        pipeline_config.update_pipeline_config(pipeline_config_or_path)

    # 4. Update PipelineConfig from CLI arguments if provided
    kwargs[prefix_with_dot + 'model_path'] = model_path
    pipeline_config.update_config_from_dict(kwargs, config_cli_prefix)
    return pipeline_config
fastvideo.configs.pipelines.PipelineConfig.from_pretrained classmethod
from_pretrained(model_path: str) -> PipelineConfig

use the pipeline class setting from model_path to match the pipeline config

Source code in fastvideo/configs/pipelines/base.py
@classmethod
def from_pretrained(cls, model_path: str) -> "PipelineConfig":
    """
    use the pipeline class setting from model_path to match the pipeline config
    """
    from fastvideo.registry import get_pipeline_config_cls_from_name
    pipeline_config_cls = get_pipeline_config_cls_from_name(model_path)

    return cast(PipelineConfig, pipeline_config_cls(model_path=model_path))
fastvideo.configs.pipelines.SlidingTileAttnConfig dataclass
SlidingTileAttnConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = None, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = DiTConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = VAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (EncoderConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, window_size: int = 16, stride: int = 8, height: int = 576, width: int = 1024, pad_to_square: bool = False, use_overlap_optimization: bool = True)

Bases: PipelineConfig

Configuration for sliding tile attention.

fastvideo.configs.pipelines.StepVideoT2VConfig dataclass
StepVideoT2VConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: int = 13, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = StepVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = StepVideoVAEConfig(), vae_precision: str = 'bf16', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (EncoderConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (postprocess_text,))(), pos_magic: str = '超高清、HDR 视频、环境光、杜比全景声、画面稳定、流畅动作、逼真的细节、专业级构图、超现实主义、自然、生动、超细节、清晰。', neg_magic: str = '画面暗、低分辨率、不良手、文本、缺少手指、多余的手指、裁剪、低质量、颗粒状、签名、水印、用户名、模糊。', timesteps_scale: bool = False, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16')

Bases: PipelineConfig

Base configuration for StepVideo pipeline architecture.

fastvideo.configs.pipelines.WanI2V480PConfig dataclass
WanI2V480PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 3.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = CLIPVisionConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: WanT2V480PConfig

Base configuration for Wan I2V 14B 480P pipeline architecture.

fastvideo.configs.pipelines.WanI2V720PConfig dataclass
WanI2V720PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 5.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = CLIPVisionConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: WanI2V480PConfig

Base configuration for Wan I2V 14B 720P pipeline architecture.

fastvideo.configs.pipelines.WanT2V480PConfig dataclass
WanT2V480PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 3.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: PipelineConfig

Base configuration for Wan T2V 1.3B pipeline architecture.

fastvideo.configs.pipelines.WanT2V720PConfig dataclass
WanT2V720PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 5.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: WanT2V480PConfig

Base configuration for Wan T2V 14B 720P pipeline architecture.

Modules

fastvideo.configs.pipelines.base
Classes
fastvideo.configs.pipelines.base.PipelineConfig dataclass
PipelineConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = None, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = DiTConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = VAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (EncoderConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None)

Base configuration for all pipeline architectures.

Functions
fastvideo.configs.pipelines.base.PipelineConfig.from_kwargs classmethod
from_kwargs(kwargs: dict[str, Any], config_cli_prefix: str = '') -> PipelineConfig

Load PipelineConfig from kwargs Dictionary. kwargs: dictionary of kwargs config_cli_prefix: prefix of CLI arguments for this PipelineConfig instance

Source code in fastvideo/configs/pipelines/base.py
@classmethod
def from_kwargs(cls,
                kwargs: dict[str, Any],
                config_cli_prefix: str = "") -> "PipelineConfig":
    """
    Load PipelineConfig from kwargs Dictionary.
    kwargs: dictionary of kwargs
    config_cli_prefix: prefix of CLI arguments for this PipelineConfig instance
    """
    from fastvideo.registry import get_pipeline_config_cls_from_name

    prefix_with_dot = f"{config_cli_prefix}." if (config_cli_prefix.strip()
                                                  != "") else ""
    model_path: str | None = kwargs.get(prefix_with_dot + 'model_path',
                                        None) or kwargs.get('model_path')
    pipeline_config_or_path: str | PipelineConfig | dict[
        str, Any] | None = kwargs.get(prefix_with_dot + 'pipeline_config',
                                      None) or kwargs.get('pipeline_config')
    if model_path is None:
        raise ValueError("model_path is required in kwargs")

    # 1. Get the pipeline config class from the registry
    pipeline_config_cls = get_pipeline_config_cls_from_name(model_path)

    # 2. Instantiate PipelineConfig
    if pipeline_config_cls is None:
        logger.warning(
            "Couldn't find pipeline config for %s. Using the default pipeline config.",
            model_path)
        pipeline_config = cls()
    else:
        pipeline_config = pipeline_config_cls()

    # 3. Load PipelineConfig from a json file or a PipelineConfig object if provided
    if isinstance(pipeline_config_or_path, str):
        pipeline_config.load_from_json(pipeline_config_or_path)
        kwargs[prefix_with_dot +
               'pipeline_config_path'] = pipeline_config_or_path
    elif isinstance(pipeline_config_or_path, PipelineConfig):
        pipeline_config = pipeline_config_or_path
    elif isinstance(pipeline_config_or_path, dict):
        pipeline_config.update_pipeline_config(pipeline_config_or_path)

    # 4. Update PipelineConfig from CLI arguments if provided
    kwargs[prefix_with_dot + 'model_path'] = model_path
    pipeline_config.update_config_from_dict(kwargs, config_cli_prefix)
    return pipeline_config
fastvideo.configs.pipelines.base.PipelineConfig.from_pretrained classmethod
from_pretrained(model_path: str) -> PipelineConfig

use the pipeline class setting from model_path to match the pipeline config

Source code in fastvideo/configs/pipelines/base.py
@classmethod
def from_pretrained(cls, model_path: str) -> "PipelineConfig":
    """
    use the pipeline class setting from model_path to match the pipeline config
    """
    from fastvideo.registry import get_pipeline_config_cls_from_name
    pipeline_config_cls = get_pipeline_config_cls_from_name(model_path)

    return cast(PipelineConfig, pipeline_config_cls(model_path=model_path))
fastvideo.configs.pipelines.base.STA_Mode

Bases: str, Enum

STA (Sliding Tile Attention) modes.

fastvideo.configs.pipelines.base.SlidingTileAttnConfig dataclass
SlidingTileAttnConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = None, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = DiTConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = VAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (EncoderConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, window_size: int = 16, stride: int = 8, height: int = 576, width: int = 1024, pad_to_square: bool = False, use_overlap_optimization: bool = True)

Bases: PipelineConfig

Configuration for sliding tile attention.

Functions
fastvideo.configs.pipelines.base.parse_int_list
parse_int_list(value: str) -> list[int]

Parse a comma-separated string of integers into a list.

Source code in fastvideo/configs/pipelines/base.py
def parse_int_list(value: str) -> list[int]:
    """Parse a comma-separated string of integers into a list."""
    if not value:
        return []
    return [int(x.strip()) for x in value.split(",")]
fastvideo.configs.pipelines.cosmos
Classes
fastvideo.configs.pipelines.cosmos.CosmosConfig dataclass
CosmosConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: int = 6, flow_shift: float = 1.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = CosmosVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = CosmosVAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5LargeConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_large_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, conditioning_strategy: str = 'frame_replace', min_num_conditional_frames: int = 1, max_num_conditional_frames: int = 2, sigma_conditional: float = 0.0001, sigma_data: float = 1.0, state_ch: int = 16, state_t: int = 24, text_encoder_class: str = 'T5')

Bases: PipelineConfig

Configuration for Cosmos2 Video2World pipeline matching diffusers.

Functions
fastvideo.configs.pipelines.cosmos.t5_large_postprocess_text
t5_large_postprocess_text(outputs: BaseEncoderOutput) -> Tensor

Postprocess T5 Large text encoder outputs for Cosmos pipeline.

Return raw last_hidden_state without truncation/padding.

Source code in fastvideo/configs/pipelines/cosmos.py
def t5_large_postprocess_text(outputs: BaseEncoderOutput) -> torch.Tensor:
    """Postprocess T5 Large text encoder outputs for Cosmos pipeline.

    Return raw last_hidden_state without truncation/padding.
    """
    hidden_state = outputs.last_hidden_state

    if hidden_state is None:
        raise ValueError("T5 Large outputs missing last_hidden_state")

    nan_count = torch.isnan(hidden_state).sum()
    if nan_count > 0:
        hidden_state = hidden_state.masked_fill(torch.isnan(hidden_state), 0.0)

    # Zero out embeddings beyond actual sequence length
    if outputs.attention_mask is not None:
        attention_mask = outputs.attention_mask
        lengths = attention_mask.sum(dim=1).cpu()
        for i, length in enumerate(lengths):
            hidden_state[i, length:] = 0

    return hidden_state
fastvideo.configs.pipelines.cosmos2_5
Classes
fastvideo.configs.pipelines.cosmos2_5.Cosmos25Config dataclass
Cosmos25Config(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 0.0, flow_shift: float = 5.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = (lambda: Cosmos25VideoConfig(arch_config=Cosmos25ArchConfig(num_attention_heads=16, attention_head_dim=128, in_channels=16, out_channels=16, num_layers=28, patch_size=[1, 2, 2], max_size=[128, 240, 240], rope_scale=[1.0, 3.0, 3.0], text_embed_dim=1024, mlp_ratio=4.0, adaln_lora_dim=256, use_adaln_lora=True, concat_padding_mask=True, extra_pos_embed_type=None, use_crossattn_projection=True, rope_enable_fps_modulation=False, qk_norm='rms_norm')))(), dit_precision: str = 'bf16', vae_config: VAEConfig = Cosmos25VAEConfig(), vae_precision: str = 'bf16', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (Reason1Config(arch_config=Reason1ArchConfig(embedding_concat_strategy='full_concat')),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (_identity_preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (reason1_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = NONE, skip_time_steps: int = 0, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None)

Bases: PipelineConfig

Configuration for Cosmos 2.5 (Predict2.5) video generation pipeline.

fastvideo.configs.pipelines.hunyuan
Classes
fastvideo.configs.pipelines.hunyuan.FastHunyuanConfig dataclass
FastHunyuanConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: int = 6, flow_shift: int = 17, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HunyuanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = HunyuanVAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (LlamaConfig(), CLIPTextConfig()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp16', 'fp16'))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (llama_preprocess_text, clip_preprocess_text))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (llama_postprocess_text, clip_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None)

Bases: HunyuanConfig

Configuration specifically optimized for FastHunyuan weights.

fastvideo.configs.pipelines.hunyuan.HunyuanConfig dataclass
HunyuanConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: int = 6, flow_shift: int = 7, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HunyuanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = HunyuanVAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (LlamaConfig(), CLIPTextConfig()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp16', 'fp16'))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (llama_preprocess_text, clip_preprocess_text))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (llama_postprocess_text, clip_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None)

Bases: PipelineConfig

Base configuration for HunYuan pipeline architecture.

fastvideo.configs.pipelines.hunyuan15
Classes
fastvideo.configs.pipelines.hunyuan15.Hunyuan15T2V480PConfig dataclass
Hunyuan15T2V480PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: int = 5, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HunyuanVideo15Config(), dit_precision: str = 'bf16', vae_config: VAEConfig = Hunyuan15VAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (Qwen2_5_VLConfig(), T5Config()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16', 'fp32'))(), preprocess_text_funcs: tuple[Callable[[Any], Any], ...] = (lambda: (qwen_preprocess_text, byt5_preprocess_text))(), postprocess_text_funcs: tuple[Callable[..., Any], ...] = (lambda: (qwen_postprocess_text, byt5_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, text_encoder_crop_start: int = PROMPT_TEMPLATE_TOKEN_LENGTH, text_encoder_max_lengths: tuple[int, ...] = (lambda: (1000 + PROMPT_TEMPLATE_TOKEN_LENGTH, 256))())

Bases: PipelineConfig

Base configuration for HunYuan pipeline architecture.

fastvideo.configs.pipelines.hunyuan15.Hunyuan15T2V720PConfig dataclass
Hunyuan15T2V720PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: int = 9, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HunyuanVideo15Config(), dit_precision: str = 'bf16', vae_config: VAEConfig = Hunyuan15VAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (Qwen2_5_VLConfig(), T5Config()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16', 'fp32'))(), preprocess_text_funcs: tuple[Callable[[Any], Any], ...] = (lambda: (qwen_preprocess_text, byt5_preprocess_text))(), postprocess_text_funcs: tuple[Callable[..., Any], ...] = (lambda: (qwen_postprocess_text, byt5_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, text_encoder_crop_start: int = PROMPT_TEMPLATE_TOKEN_LENGTH, text_encoder_max_lengths: tuple[int, ...] = (lambda: (1000 + PROMPT_TEMPLATE_TOKEN_LENGTH, 256))())

Bases: Hunyuan15T2V480PConfig

Base configuration for HunYuan pipeline architecture.

Functions
fastvideo.configs.pipelines.hunyuan15.extract_glyph_texts
extract_glyph_texts(prompt: str) -> str | None

Extract glyph texts from prompt using regex pattern.

Parameters:

Name Type Description Default
prompt str

Input prompt string

required

Returns:

Type Description
str | None

List of extracted glyph texts

Source code in fastvideo/configs/pipelines/hunyuan15.py
def extract_glyph_texts(prompt: str) -> str | None:
    """
    Extract glyph texts from prompt using regex pattern.

    Args:
        prompt: Input prompt string

    Returns:
        List of extracted glyph texts
    """
    pattern = r"\"(.*?)\"|“(.*?)”"
    matches = re.findall(pattern, prompt)
    result = [match[0] or match[1] for match in matches]
    result = list(dict.fromkeys(result)) if len(result) > 1 else result

    if result:
        formatted_result = ". ".join([f'Text "{text}"'
                                      for text in result]) + ". "
    else:
        formatted_result = None

    return formatted_result
fastvideo.configs.pipelines.hunyuan15.format_text_input
format_text_input(prompt: str, system_message: str) -> list[dict[str, Any]]

Apply text to template.

Parameters:

Name Type Description Default
prompt List[str]

Input text.

required
system_message str

System message.

required

Returns:

Type Description
list[dict[str, Any]]

List[Dict[str, Any]]: List of chat conversation.

Source code in fastvideo/configs/pipelines/hunyuan15.py
def format_text_input(prompt: str, system_message: str) -> list[dict[str, Any]]:
    """
    Apply text to template.

    Args:
        prompt (List[str]): Input text.
        system_message (str): System message.

    Returns:
        List[Dict[str, Any]]: List of chat conversation.
    """

    template = [{
        "role": "system",
        "content": system_message
    }, {
        "role": "user",
        "content": prompt if prompt else " "
    }]

    return template
fastvideo.configs.pipelines.hyworld
Classes
fastvideo.configs.pipelines.hyworld.HYWorldConfig dataclass
HYWorldConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: int = 5, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = HYWorldConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = Hunyuan15VAEConfig(), vae_precision: str = 'fp16', vae_tiling: bool = True, vae_sp: bool = True, image_encoder_config: EncoderConfig = SiglipVisionConfig(), image_encoder_precision: str = 'fp16', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (Qwen2_5_VLConfig(), T5Config()))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp16', 'fp32'))(), preprocess_text_funcs: tuple[Callable[[Any], Any], ...] = (lambda: (qwen_preprocess_text, byt5_preprocess_text))(), postprocess_text_funcs: tuple[Callable[..., Any], ...] = (lambda: (qwen_postprocess_text, byt5_postprocess_text))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, text_encoder_crop_start: int = PROMPT_TEMPLATE_TOKEN_LENGTH, text_encoder_max_lengths: tuple[int, ...] = (lambda: (1000 + PROMPT_TEMPLATE_TOKEN_LENGTH, 256))())

Bases: Hunyuan15T2V480PConfig

Base configuration for HYWorld pipeline architecture.

fastvideo.configs.pipelines.longcat
Classes
fastvideo.configs.pipelines.longcat.LongCatDiTArchConfig dataclass
LongCatDiTArchConfig(stacked_params_mapping: list[tuple[str, str, str]] = list(), _fsdp_shard_conditions: list = list(), _compile_conditions: list = list(), param_names_mapping: dict = dict(), reverse_param_names_mapping: dict = dict(), lora_param_names_mapping: dict = dict(), _supported_attention_backends: tuple[AttentionBackendEnum, ...] = (SLIDING_TILE_ATTN, SAGE_ATTN, FLASH_ATTN, TORCH_SDPA, VIDEO_SPARSE_ATTN, VMOBA_ATTN, SAGE_ATTN_THREE, SLA_ATTN, SAGE_SLA_ATTN), hidden_size: int = 0, num_attention_heads: int = 0, num_channels_latents: int = 0, in_channels: int = 16, out_channels: int = 16, exclude_lora_layers: list[str] = list(), boundary_ratio: float | None = None, adaln_tembed_dim: int = 512, caption_channels: int = 4096, depth: int = 48, enable_bsa: bool = False, enable_flashattn3: bool = False, enable_flashattn2: bool = True, enable_xformers: bool = False, frequency_embedding_size: int = 256, mlp_ratio: int = 4, num_heads: int = 32, text_tokens_zero_pad: bool = True, patch_size: list[int] = (lambda: [1, 2, 2])(), cp_split_hw: list[int] | None = None, bsa_params: dict | None = None)

Bases: DiTArchConfig

Extended DiTArchConfig with LongCat-specific fields.

fastvideo.configs.pipelines.longcat.LongCatT2V480PConfig dataclass
LongCatT2V480PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = None, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = (lambda: DiTConfig(arch_config=LongCatDiTArchConfig()))(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'bf16', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[T5Config, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (longcat_preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (umt5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, enable_kv_cache: bool = True, offload_kv_cache: bool = False, enable_bsa: bool = False, use_distill: bool = False, enhance_hf: bool = False, bsa_params: dict | None = None, bsa_sparsity: float | None = None, bsa_cdf_threshold: float | None = None, bsa_chunk_q: list[int] | None = None, bsa_chunk_k: list[int] | None = None, t_thresh: float | None = None)

Bases: PipelineConfig

Configuration for LongCat pipeline (480p).

Components expected by loaders
  • tokenizer: AutoTokenizer
  • text_encoder: UMT5EncoderModel
  • transformer: LongCatTransformer3DModel
  • vae: AutoencoderKLWan (Wan VAE, 4x8 compression)
  • scheduler: FlowMatchEulerDiscreteScheduler
fastvideo.configs.pipelines.longcat.LongCatT2V704PConfig dataclass
LongCatT2V704PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = None, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = (lambda: DiTConfig(arch_config=LongCatDiTArchConfig()))(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'bf16', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[T5Config, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (longcat_preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (umt5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, enable_kv_cache: bool = True, offload_kv_cache: bool = False, enable_bsa: bool = True, use_distill: bool = False, enhance_hf: bool = False, bsa_params: dict | None = None, bsa_sparsity: float | None = None, bsa_cdf_threshold: float | None = None, bsa_chunk_q: list[int] | None = None, bsa_chunk_k: list[int] | None = None, t_thresh: float | None = None)

Bases: LongCatT2V480PConfig

Configuration for LongCat pipeline (704p) with BSA enabled by default.

Uses the same resolution and BSA parameters as original LongCat refinement stage. BSA parameters configured in transformer config.json with chunk_3d_shape=[4,4,4]: - Input: 704×1280×96 - VAE (8x): 88×160×96
- Patch [1,2,2]: 44×80×96 - chunk [4,4,4]: 96%4=0, 44%4=0, 80%4=0 ✅

This configuration matches the original LongCat refinement stage parameters.

Functions
fastvideo.configs.pipelines.longcat.longcat_preprocess_text
longcat_preprocess_text(prompt: str) -> str

Clean and preprocess text like original LongCat implementation.

This function applies the same text cleaning pipeline as the original LongCat-Video implementation to ensure identical tokenization results.

Steps: 1. basic_clean: Fix unicode issues and unescape HTML entities 2. whitespace_clean: Normalize whitespace to single spaces

Parameters:

Name Type Description Default
prompt str

Raw input text prompt

required

Returns:

Type Description
str

Cleaned and normalized text prompt

Source code in fastvideo/configs/pipelines/longcat.py
def longcat_preprocess_text(prompt: str) -> str:
    """Clean and preprocess text like original LongCat implementation.

    This function applies the same text cleaning pipeline as the original
    LongCat-Video implementation to ensure identical tokenization results.

    Steps:
    1. basic_clean: Fix unicode issues and unescape HTML entities
    2. whitespace_clean: Normalize whitespace to single spaces

    Args:
        prompt: Raw input text prompt

    Returns:
        Cleaned and normalized text prompt
    """
    # basic_clean: fix unicode and HTML entities
    text = ftfy.fix_text(prompt)
    text = html.unescape(html.unescape(text))
    text = text.strip()

    # whitespace_clean: normalize whitespace
    text = re.sub(r"\s+", " ", text)
    text = text.strip()

    return text
fastvideo.configs.pipelines.longcat.umt5_postprocess_text
umt5_postprocess_text(outputs: BaseEncoderOutput) -> Tensor

Postprocess UMT5/T5 encoder outputs to fixed length 512 embeddings.

Source code in fastvideo/configs/pipelines/longcat.py
def umt5_postprocess_text(outputs: BaseEncoderOutput) -> torch.Tensor:
    """
    Postprocess UMT5/T5 encoder outputs to fixed length 512 embeddings.
    """
    mask: torch.Tensor = outputs.attention_mask
    hidden_state: torch.Tensor = outputs.last_hidden_state
    seq_lens = mask.gt(0).sum(dim=1).long()
    assert torch.isnan(hidden_state).sum() == 0
    prompt_embeds = [u[:v] for u, v in zip(hidden_state, seq_lens, strict=True)]
    prompt_embeds_tensor: torch.Tensor = torch.stack([
        torch.cat([u, u.new_zeros(512 - u.size(0), u.size(1))])
        for u in prompt_embeds
    ],
                                                     dim=0)
    return prompt_embeds_tensor
fastvideo.configs.pipelines.ltx2
Classes
fastvideo.configs.pipelines.ltx2.LTX2T2VConfig dataclass
LTX2T2VConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = None, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = LTX2VideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = LTX2VAEConfig(), vae_precision: str = 'bf16', vae_tiling: bool = True, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (LTX2GemmaConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('bf16',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (ltx2_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, audio_decoder_config: ModelConfig = LTX2AudioDecoderConfig(), vocoder_config: ModelConfig = LTX2VocoderConfig(), audio_decoder_precision: str = 'bf16', vocoder_precision: str = 'bf16')

Bases: PipelineConfig

Configuration for LTX-2 T2V pipeline.

fastvideo.configs.pipelines.stepvideo
Classes
fastvideo.configs.pipelines.stepvideo.StepVideoT2VConfig dataclass
StepVideoT2VConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: int = 13, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = StepVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = StepVideoVAEConfig(), vae_precision: str = 'bf16', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (EncoderConfig(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], tensor], ...] = (lambda: (postprocess_text,))(), pos_magic: str = '超高清、HDR 视频、环境光、杜比全景声、画面稳定、流畅动作、逼真的细节、专业级构图、超现实主义、自然、生动、超细节、清晰。', neg_magic: str = '画面暗、低分辨率、不良手、文本、缺少手指、多余的手指、裁剪、低质量、颗粒状、签名、水印、用户名、模糊。', timesteps_scale: bool = False, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16')

Bases: PipelineConfig

Base configuration for StepVideo pipeline architecture.

fastvideo.configs.pipelines.turbodiffusion

TurboDiffusion pipeline configurations.

TurboDiffusion uses RCM (recurrent Consistency Model) scheduler with SLA (Sparse-Linear Attention) for fast 1-4 step video generation.

Classes
fastvideo.configs.pipelines.turbodiffusion.TurboDiffusionI2VConfig dataclass
TurboDiffusionI2VConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 5.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = CLIPVisionConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = 0.9, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: PipelineConfig

Base configuration for TurboDiffusion I2V pipeline.

Uses RCM scheduler with sigma_max=200 for 1-4 step generation. Uses boundary_ratio=0.9 for high-noise to low-noise model switching.

fastvideo.configs.pipelines.turbodiffusion.TurboDiffusionI2V_A14B_Config dataclass
TurboDiffusionI2V_A14B_Config(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 5.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = CLIPVisionConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = 0.9, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: TurboDiffusionI2VConfig

Configuration for TurboDiffusion I2V A14B model.

fastvideo.configs.pipelines.turbodiffusion.TurboDiffusionT2VConfig dataclass
TurboDiffusionT2VConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 3.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: PipelineConfig

Base configuration for TurboDiffusion T2V pipeline.

Uses RCM scheduler with sigma_max=80 for 1-4 step generation. No boundary_ratio (single model, no switching).

fastvideo.configs.pipelines.turbodiffusion.TurboDiffusionT2V_14B_Config dataclass
TurboDiffusionT2V_14B_Config(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 5.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: TurboDiffusionT2VConfig

Configuration for TurboDiffusion T2V 14B model.

Uses same config as 1.3B but with higher flow_shift for 14B model.

fastvideo.configs.pipelines.turbodiffusion.TurboDiffusionT2V_1_3B_Config dataclass
TurboDiffusionT2V_1_3B_Config(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 3.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: TurboDiffusionT2VConfig

Configuration for TurboDiffusion T2V 1.3B model.

fastvideo.configs.pipelines.wan
Classes
fastvideo.configs.pipelines.wan.FastWan2_1_T2V_480P_Config dataclass
FastWan2_1_T2V_480P_Config(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 8.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = (lambda: [1000, 757, 522])(), ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: WanT2V480PConfig

Base configuration for FastWan T2V 1.3B 480P pipeline architecture with DMD

fastvideo.configs.pipelines.wan.WANV2VConfig dataclass
WANV2VConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 3.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = WAN2_1ControlCLIPVisionConfig(), image_encoder_precision: str = 'bf16', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: WanI2V480PConfig

Configuration for WAN2.1 1.3B Control pipeline.

fastvideo.configs.pipelines.wan.WanI2V480PConfig dataclass
WanI2V480PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 3.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = CLIPVisionConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: WanT2V480PConfig

Base configuration for Wan I2V 14B 480P pipeline architecture.

fastvideo.configs.pipelines.wan.WanI2V720PConfig dataclass
WanI2V720PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 5.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = CLIPVisionConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: WanI2V480PConfig

Base configuration for Wan I2V 14B 720P pipeline architecture.

fastvideo.configs.pipelines.wan.WanT2V480PConfig dataclass
WanT2V480PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 3.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: PipelineConfig

Base configuration for Wan T2V 1.3B pipeline architecture.

fastvideo.configs.pipelines.wan.WanT2V720PConfig dataclass
WanT2V720PConfig(model_path: str = '', pipeline_config_path: str | None = None, embedded_cfg_scale: float = 6.0, flow_shift: float | None = 5.0, disable_autocast: bool = False, is_causal: bool = False, dit_config: DiTConfig = WanVideoConfig(), dit_precision: str = 'bf16', vae_config: VAEConfig = WanVAEConfig(), vae_precision: str = 'fp32', vae_tiling: bool = False, vae_sp: bool = False, image_encoder_config: EncoderConfig = EncoderConfig(), image_encoder_precision: str = 'fp32', text_encoder_configs: tuple[EncoderConfig, ...] = (lambda: (T5Config(),))(), text_encoder_precisions: tuple[str, ...] = (lambda: ('fp32',))(), preprocess_text_funcs: tuple[Callable[[str], str], ...] = (lambda: (preprocess_text,))(), postprocess_text_funcs: tuple[Callable[[BaseEncoderOutput], Tensor], ...] = (lambda: (t5_postprocess_text,))(), pos_magic: str | None = None, neg_magic: str | None = None, timesteps_scale: bool | None = None, mask_strategy_file_path: str | None = None, STA_mode: STA_Mode = STA_INFERENCE, skip_time_steps: int = 15, dmd_denoising_steps: list[int] | None = None, ti2v_task: bool = False, boundary_ratio: float | None = None, precision: str = 'bf16', warp_denoising_step: bool = True)

Bases: WanT2V480PConfig

Base configuration for Wan T2V 14B 720P pipeline architecture.

fastvideo.configs.sample

Classes

fastvideo.configs.sample.SamplingParam dataclass
SamplingParam(data_type: str = 'video', image_path: str | None = None, pil_image: Any | None = None, video_path: str | None = None, mouse_cond: Any | None = None, keyboard_cond: Any | None = None, grid_sizes: Any | None = None, pose: str | None = None, refine_from: str | None = None, t_thresh: float = 0.5, spatial_refine_only: bool = False, num_cond_frames: int = 0, stage1_video: Any | None = None, prompt: str | list[str] | None = None, negative_prompt: str = 'Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 125, num_frames_round_down: bool = False, height: int = 720, width: int = 1280, fps: int = 24, num_inference_steps: int = 50, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, sigmas: list[float] | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Sampling parameters for video generation.

Functions
fastvideo.configs.sample.SamplingParam.add_cli_args staticmethod
add_cli_args(parser: Any) -> Any

Add CLI arguments for SamplingParam fields

Source code in fastvideo/configs/sample/base.py
@staticmethod
def add_cli_args(parser: Any) -> Any:
    """Add CLI arguments for SamplingParam fields"""
    parser.add_argument(
        "--prompt",
        type=str,
        default=SamplingParam.prompt,
        help="Text prompt for video generation",
    )
    parser.add_argument(
        "--negative-prompt",
        type=str,
        default=SamplingParam.negative_prompt,
        help="Negative text prompt for video generation",
    )
    parser.add_argument(
        "--prompt-path",
        type=str,
        default=SamplingParam.prompt_path,
        help="Path to a text file containing the prompt",
    )
    parser.add_argument(
        "--output-path",
        type=str,
        default=SamplingParam.output_path,
        help="Path to save the generated video",
    )
    parser.add_argument(
        "--output-video-name",
        type=str,
        default=SamplingParam.output_video_name,
        help="Name of the output video",
    )
    parser.add_argument(
        "--num-videos-per-prompt",
        type=int,
        default=SamplingParam.num_videos_per_prompt,
        help="Number of videos to generate per prompt",
    )
    parser.add_argument(
        "--seed",
        type=int,
        default=SamplingParam.seed,
        help="Random seed for generation",
    )
    parser.add_argument(
        "--num-frames",
        type=int,
        default=SamplingParam.num_frames,
        help="Number of frames to generate",
    )
    parser.add_argument(
        "--height",
        type=int,
        default=SamplingParam.height,
        help="Height of generated video",
    )
    parser.add_argument(
        "--width",
        type=int,
        default=SamplingParam.width,
        help="Width of generated video",
    )
    parser.add_argument(
        "--fps",
        type=int,
        default=SamplingParam.fps,
        help="Frames per second for saved video",
    )
    parser.add_argument(
        "--num-inference-steps",
        type=int,
        default=SamplingParam.num_inference_steps,
        help="Number of denoising steps",
    )
    parser.add_argument(
        "--guidance-scale",
        type=float,
        default=SamplingParam.guidance_scale,
        help="Classifier-free guidance scale",
    )
    parser.add_argument(
        "--guidance-rescale",
        type=float,
        default=SamplingParam.guidance_rescale,
        help="Guidance rescale factor",
    )
    parser.add_argument(
        "--boundary-ratio",
        type=float,
        default=SamplingParam.boundary_ratio,
        help="Boundary timestep ratio",
    )
    parser.add_argument(
        "--save-video",
        action="store_true",
        default=SamplingParam.save_video,
        help="Whether to save the video to disk",
    )
    parser.add_argument(
        "--no-save-video",
        action="store_false",
        dest="save_video",
        help="Don't save the video to disk",
    )
    parser.add_argument(
        "--return-frames",
        action="store_true",
        default=SamplingParam.return_frames,
        help="Whether to return the raw frames",
    )
    parser.add_argument(
        "--image-path",
        type=str,
        default=SamplingParam.image_path,
        help="Path to input image for image-to-video generation",
    )
    parser.add_argument(
        "--video-path",
        type=str,
        default=SamplingParam.video_path,
        help="Path to input video for video-to-video generation",
    )
    parser.add_argument(
        "--refine-from",
        type=str,
        default=SamplingParam.refine_from,
        help="Path to stage1 video for refinement (LongCat 480p->720p)",
    )
    parser.add_argument(
        "--t-thresh",
        type=float,
        default=SamplingParam.t_thresh,
        help=
        "Threshold for timestep scheduling in refinement (default: 0.5)",
    )
    parser.add_argument(
        "--spatial-refine-only",
        action=StoreBoolean,
        default=SamplingParam.spatial_refine_only,
        help="Only perform spatial super-resolution (no temporal doubling)",
    )
    parser.add_argument(
        "--num-cond-frames",
        type=int,
        default=SamplingParam.num_cond_frames,
        help="Number of conditioning frames for refinement",
    )
    parser.add_argument(
        "--moba-config-path",
        type=str,
        default=None,
        help=
        "Path to a JSON file containing V-MoBA specific configurations.",
    )
    parser.add_argument(
        "--return-trajectory-latents",
        action="store_true",
        default=SamplingParam.return_trajectory_latents,
        help="Whether to return the trajectory",
    )
    parser.add_argument(
        "--return-trajectory-decoded",
        action="store_true",
        default=SamplingParam.return_trajectory_decoded,
        help="Whether to return the decoded trajectory",
    )
    return parser

Modules

fastvideo.configs.sample.base
Classes
fastvideo.configs.sample.base.SamplingParam dataclass
SamplingParam(data_type: str = 'video', image_path: str | None = None, pil_image: Any | None = None, video_path: str | None = None, mouse_cond: Any | None = None, keyboard_cond: Any | None = None, grid_sizes: Any | None = None, pose: str | None = None, refine_from: str | None = None, t_thresh: float = 0.5, spatial_refine_only: bool = False, num_cond_frames: int = 0, stage1_video: Any | None = None, prompt: str | list[str] | None = None, negative_prompt: str = 'Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 125, num_frames_round_down: bool = False, height: int = 720, width: int = 1280, fps: int = 24, num_inference_steps: int = 50, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, sigmas: list[float] | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Sampling parameters for video generation.

Functions
fastvideo.configs.sample.base.SamplingParam.add_cli_args staticmethod
add_cli_args(parser: Any) -> Any

Add CLI arguments for SamplingParam fields

Source code in fastvideo/configs/sample/base.py
@staticmethod
def add_cli_args(parser: Any) -> Any:
    """Add CLI arguments for SamplingParam fields"""
    parser.add_argument(
        "--prompt",
        type=str,
        default=SamplingParam.prompt,
        help="Text prompt for video generation",
    )
    parser.add_argument(
        "--negative-prompt",
        type=str,
        default=SamplingParam.negative_prompt,
        help="Negative text prompt for video generation",
    )
    parser.add_argument(
        "--prompt-path",
        type=str,
        default=SamplingParam.prompt_path,
        help="Path to a text file containing the prompt",
    )
    parser.add_argument(
        "--output-path",
        type=str,
        default=SamplingParam.output_path,
        help="Path to save the generated video",
    )
    parser.add_argument(
        "--output-video-name",
        type=str,
        default=SamplingParam.output_video_name,
        help="Name of the output video",
    )
    parser.add_argument(
        "--num-videos-per-prompt",
        type=int,
        default=SamplingParam.num_videos_per_prompt,
        help="Number of videos to generate per prompt",
    )
    parser.add_argument(
        "--seed",
        type=int,
        default=SamplingParam.seed,
        help="Random seed for generation",
    )
    parser.add_argument(
        "--num-frames",
        type=int,
        default=SamplingParam.num_frames,
        help="Number of frames to generate",
    )
    parser.add_argument(
        "--height",
        type=int,
        default=SamplingParam.height,
        help="Height of generated video",
    )
    parser.add_argument(
        "--width",
        type=int,
        default=SamplingParam.width,
        help="Width of generated video",
    )
    parser.add_argument(
        "--fps",
        type=int,
        default=SamplingParam.fps,
        help="Frames per second for saved video",
    )
    parser.add_argument(
        "--num-inference-steps",
        type=int,
        default=SamplingParam.num_inference_steps,
        help="Number of denoising steps",
    )
    parser.add_argument(
        "--guidance-scale",
        type=float,
        default=SamplingParam.guidance_scale,
        help="Classifier-free guidance scale",
    )
    parser.add_argument(
        "--guidance-rescale",
        type=float,
        default=SamplingParam.guidance_rescale,
        help="Guidance rescale factor",
    )
    parser.add_argument(
        "--boundary-ratio",
        type=float,
        default=SamplingParam.boundary_ratio,
        help="Boundary timestep ratio",
    )
    parser.add_argument(
        "--save-video",
        action="store_true",
        default=SamplingParam.save_video,
        help="Whether to save the video to disk",
    )
    parser.add_argument(
        "--no-save-video",
        action="store_false",
        dest="save_video",
        help="Don't save the video to disk",
    )
    parser.add_argument(
        "--return-frames",
        action="store_true",
        default=SamplingParam.return_frames,
        help="Whether to return the raw frames",
    )
    parser.add_argument(
        "--image-path",
        type=str,
        default=SamplingParam.image_path,
        help="Path to input image for image-to-video generation",
    )
    parser.add_argument(
        "--video-path",
        type=str,
        default=SamplingParam.video_path,
        help="Path to input video for video-to-video generation",
    )
    parser.add_argument(
        "--refine-from",
        type=str,
        default=SamplingParam.refine_from,
        help="Path to stage1 video for refinement (LongCat 480p->720p)",
    )
    parser.add_argument(
        "--t-thresh",
        type=float,
        default=SamplingParam.t_thresh,
        help=
        "Threshold for timestep scheduling in refinement (default: 0.5)",
    )
    parser.add_argument(
        "--spatial-refine-only",
        action=StoreBoolean,
        default=SamplingParam.spatial_refine_only,
        help="Only perform spatial super-resolution (no temporal doubling)",
    )
    parser.add_argument(
        "--num-cond-frames",
        type=int,
        default=SamplingParam.num_cond_frames,
        help="Number of conditioning frames for refinement",
    )
    parser.add_argument(
        "--moba-config-path",
        type=str,
        default=None,
        help=
        "Path to a JSON file containing V-MoBA specific configurations.",
    )
    parser.add_argument(
        "--return-trajectory-latents",
        action="store_true",
        default=SamplingParam.return_trajectory_latents,
        help="Whether to return the trajectory",
    )
    parser.add_argument(
        "--return-trajectory-decoded",
        action="store_true",
        default=SamplingParam.return_trajectory_decoded,
        help="Whether to return the decoded trajectory",
    )
    return parser
Functions
fastvideo.configs.sample.ltx2
Classes
fastvideo.configs.sample.ltx2.LTX2SamplingParam dataclass
LTX2SamplingParam(data_type: str = 'video', image_path: str | None = None, pil_image: Any | None = None, video_path: str | None = None, mouse_cond: Any | None = None, keyboard_cond: Any | None = None, grid_sizes: Any | None = None, pose: str | None = None, refine_from: str | None = None, t_thresh: float = 0.5, spatial_refine_only: bool = False, num_cond_frames: int = 0, stage1_video: Any | None = None, prompt: str | list[str] | None = None, negative_prompt: str = '', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 10, num_frames: int = 121, num_frames_round_down: bool = False, height: int = 1024, width: int = 1536, fps: int = 24, num_inference_steps: int = 8, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, sigmas: list[float] | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: SamplingParam

Default sampling parameters for LTX-2 distilled T2V.

fastvideo.configs.sample.turbodiffusion

TurboDiffusion sampling parameters.

TurboDiffusion uses RCM (recurrent Consistency Model) scheduler for 1-4 step video generation with no classifier-free guidance.

Classes
fastvideo.configs.sample.turbodiffusion.TurboDiffusionI2V_A14B_SamplingParam dataclass
TurboDiffusionI2V_A14B_SamplingParam(data_type: str = 'video', image_path: str | None = None, pil_image: Any | None = None, video_path: str | None = None, mouse_cond: Any | None = None, keyboard_cond: Any | None = None, grid_sizes: Any | None = None, pose: str | None = None, refine_from: str | None = None, t_thresh: float = 0.5, spatial_refine_only: bool = False, num_cond_frames: int = 0, stage1_video: Any | None = None, prompt: str | list[str] | None = None, negative_prompt: str | None = None, prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 81, num_frames_round_down: bool = False, height: int = 720, width: int = 1280, fps: int = 16, num_inference_steps: int = 4, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, sigmas: list[float] | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: SamplingParam

Sampling parameters for TurboDiffusion I2V A14B model.

Uses 4-step RCM sampling with dual-model switching (high/low noise).

fastvideo.configs.sample.turbodiffusion.TurboDiffusionT2V_14B_SamplingParam dataclass
TurboDiffusionT2V_14B_SamplingParam(data_type: str = 'video', image_path: str | None = None, pil_image: Any | None = None, video_path: str | None = None, mouse_cond: Any | None = None, keyboard_cond: Any | None = None, grid_sizes: Any | None = None, pose: str | None = None, refine_from: str | None = None, t_thresh: float = 0.5, spatial_refine_only: bool = False, num_cond_frames: int = 0, stage1_video: Any | None = None, prompt: str | list[str] | None = None, negative_prompt: str | None = None, prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 81, num_frames_round_down: bool = False, height: int = 720, width: int = 1280, fps: int = 16, num_inference_steps: int = 4, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, sigmas: list[float] | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: SamplingParam

Sampling parameters for TurboDiffusion T2V 14B model.

Uses 4-step RCM sampling with guidance_scale=1.0 (no CFG).

fastvideo.configs.sample.turbodiffusion.TurboDiffusionT2V_1_3B_SamplingParam dataclass
TurboDiffusionT2V_1_3B_SamplingParam(data_type: str = 'video', image_path: str | None = None, pil_image: Any | None = None, video_path: str | None = None, mouse_cond: Any | None = None, keyboard_cond: Any | None = None, grid_sizes: Any | None = None, pose: str | None = None, refine_from: str | None = None, t_thresh: float = 0.5, spatial_refine_only: bool = False, num_cond_frames: int = 0, stage1_video: Any | None = None, prompt: str | list[str] | None = None, negative_prompt: str | None = None, prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 81, num_frames_round_down: bool = False, height: int = 480, width: int = 832, fps: int = 16, num_inference_steps: int = 4, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, sigmas: list[float] | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: SamplingParam

Sampling parameters for TurboDiffusion T2V 1.3B model.

Uses 4-step RCM sampling with guidance_scale=1.0 (no CFG).

fastvideo.configs.sample.wan
Classes
fastvideo.configs.sample.wan.Wan2_1_Fun_1_3B_InP_SamplingParam dataclass
Wan2_1_Fun_1_3B_InP_SamplingParam(data_type: str = 'video', image_path: str | None = None, pil_image: Any | None = None, video_path: str | None = None, mouse_cond: Any | None = None, keyboard_cond: Any | None = None, grid_sizes: Any | None = None, pose: str | None = None, refine_from: str | None = None, t_thresh: float = 0.5, spatial_refine_only: bool = False, num_cond_frames: int = 0, stage1_video: Any | None = None, prompt: str | list[str] | None = None, negative_prompt: str | None = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 81, num_frames_round_down: bool = False, height: int = 480, width: int = 832, fps: int = 16, num_inference_steps: int = 50, guidance_scale: float = 6.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, sigmas: list[float] | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: SamplingParam

Sampling parameters for Wan2.1 Fun 1.3B InP model.

fastvideo.configs.sample.wan.Wan2_2_Base_SamplingParam dataclass
Wan2_2_Base_SamplingParam(data_type: str = 'video', image_path: str | None = None, pil_image: Any | None = None, video_path: str | None = None, mouse_cond: Any | None = None, keyboard_cond: Any | None = None, grid_sizes: Any | None = None, pose: str | None = None, refine_from: str | None = None, t_thresh: float = 0.5, spatial_refine_only: bool = False, num_cond_frames: int = 0, stage1_video: Any | None = None, prompt: str | list[str] | None = None, negative_prompt: str | None = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 125, num_frames_round_down: bool = False, height: int = 720, width: int = 1280, fps: int = 24, num_inference_steps: int = 50, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, sigmas: list[float] | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: SamplingParam

Sampling parameters for Wan2.2 TI2V 5B model.

fastvideo.configs.sample.wan.Wan2_2_TI2V_5B_SamplingParam dataclass
Wan2_2_TI2V_5B_SamplingParam(data_type: str = 'video', image_path: str | None = None, pil_image: Any | None = None, video_path: str | None = None, mouse_cond: Any | None = None, keyboard_cond: Any | None = None, grid_sizes: Any | None = None, pose: str | None = None, refine_from: str | None = None, t_thresh: float = 0.5, spatial_refine_only: bool = False, num_cond_frames: int = 0, stage1_video: Any | None = None, prompt: str | list[str] | None = None, negative_prompt: str | None = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 121, num_frames_round_down: bool = False, height: int = 704, width: int = 1280, fps: int = 24, num_inference_steps: int = 50, guidance_scale: float = 5.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, sigmas: list[float] | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: Wan2_2_Base_SamplingParam

Sampling parameters for Wan2.2 TI2V 5B model.

fastvideo.configs.utils

Functions

fastvideo.configs.utils.clean_cli_args
clean_cli_args(args: Namespace) -> dict[str, Any]

Clean the arguments by removing the ones that not explicitly provided by the user.

Source code in fastvideo/configs/utils.py
def clean_cli_args(args: argparse.Namespace) -> dict[str, Any]:
    """
    Clean the arguments by removing the ones that not explicitly provided by the user.
    """
    provided_args = {}
    for k, v in vars(args).items():
        if (v is not None and hasattr(args, '_provided')
                and k in args._provided):
            provided_args[k] = v

    return provided_args
fastvideo.configs.utils.update_config_from_args
update_config_from_args(config: Any, args_dict: dict[str, Any], prefix: str = '', pop_args: bool = False) -> bool

Update configuration object from arguments dictionary.

Parameters:

Name Type Description Default
config Any

The configuration object to update

required
args_dict dict[str, Any]

Dictionary containing arguments

required
prefix str

Prefix for the configuration parameters in the args_dict. If None, assumes direct attribute mapping without prefix.

''
Source code in fastvideo/configs/utils.py
def update_config_from_args(config: Any,
                            args_dict: dict[str, Any],
                            prefix: str = "",
                            pop_args: bool = False) -> bool:
    """
    Update configuration object from arguments dictionary.

    Args:
        config: The configuration object to update
        args_dict: Dictionary containing arguments
        prefix: Prefix for the configuration parameters in the args_dict.
               If None, assumes direct attribute mapping without prefix.
    """
    # Handle top-level attributes (no prefix)
    args_not_to_remove = [
        'model_path',
    ]
    args_to_remove = []
    if prefix.strip() == "":
        for key, value in args_dict.items():
            if hasattr(config, key) and value is not None:
                if key == "text_encoder_precisions" and isinstance(value, list):
                    setattr(config, key, tuple(value))
                else:
                    setattr(config, key, value)
                if pop_args:
                    args_to_remove.append(key)
    else:
        # Handle nested attributes with prefix
        prefix_with_dot = f"{prefix}."
        for key, value in args_dict.items():
            if key.startswith(prefix_with_dot) and value is not None:
                attr_name = key[len(prefix_with_dot):]
                if hasattr(config, attr_name):
                    setattr(config, attr_name, value)
                if pop_args:
                    args_to_remove.append(key)

    if pop_args:
        for key in args_to_remove:
            if key not in args_not_to_remove:
                args_dict.pop(key)

    return len(args_to_remove) > 0