fastvideo.v1.configs.pipelines.base#

Module Contents#

Classes#

PipelineConfig

Base configuration for all pipeline architectures.

SlidingTileAttnConfig

Configuration for sliding tile attention.

Functions#

Data#

API#

class fastvideo.v1.configs.pipelines.base.PipelineConfig[source]#

Base configuration for all pipeline architectures.

disable_autocast: bool[source]#

False

dit_config: fastvideo.v1.configs.models.DiTConfig[source]#

‘field(…)’

dump_to_json(file_path: str)[source]#
embedded_cfg_scale: float[source]#

6.0

enable_torch_compile: bool[source]#

False

flow_shift: Optional[float][source]#

None

classmethod from_pretrained(model_path: str) fastvideo.v1.configs.pipelines.base.PipelineConfig[source]#
load_from_json(file_path: str)[source]#
mask_strategy_file_path: Optional[str][source]#

None

postprocess_text_funcs: Tuple[Callable[[fastvideo.v1.configs.models.encoders.BaseEncoderOutput], torch.tensor], ...][source]#

‘field(…)’

precision: str[source]#

‘bf16’

preprocess_text_funcs: Tuple[Callable[[str], str], ...][source]#

‘field(…)’

text_encoder_configs: Tuple[fastvideo.v1.configs.models.EncoderConfig, ...][source]#

‘field(…)’

text_encoder_precisions: Tuple[str, ...][source]#

‘field(…)’

update_pipeline_config(source_pipeline_dict: Dict[str, Any]) None[source]#
use_cpu_offload: bool[source]#

False

vae_config: fastvideo.v1.configs.models.VAEConfig[source]#

‘field(…)’

vae_precision: str[source]#

‘fp16’

vae_sp: bool[source]#

True

vae_tiling: bool[source]#

True

class fastvideo.v1.configs.pipelines.base.SlidingTileAttnConfig[source]#

Bases: fastvideo.v1.configs.pipelines.base.PipelineConfig

Configuration for sliding tile attention.

height: int[source]#

576

pad_to_square: bool[source]#

False

stride: int[source]#

8

use_overlap_optimization: bool#

True

width: int[source]#

1024

window_size: int[source]#

16

fastvideo.v1.configs.pipelines.base.logger[source]#

‘init_logger(…)’

fastvideo.v1.configs.pipelines.base.postprocess_text(output: fastvideo.v1.configs.models.encoders.BaseEncoderOutput) torch.tensor[source]#
fastvideo.v1.configs.pipelines.base.preprocess_text(prompt: str) str[source]#