fastvideo.v1.configs.pipelines.base

Contents

fastvideo.v1.configs.pipelines.base#

Module Contents#

Classes#

PipelineConfig

Base configuration for all pipeline architectures.

STA_Mode

STA (Sliding Tile Attention) modes.

SlidingTileAttnConfig

Configuration for sliding tile attention.

Functions#

Data#

API#

class fastvideo.v1.configs.pipelines.base.PipelineConfig[source]#

Base configuration for all pipeline architectures.

DEFAULT_TEXT_ENCODER_PRECISIONS[source]#

(‘fp16’,)

STA_mode: fastvideo.v1.configs.pipelines.base.STA_Mode[source]#

None

static add_cli_args(parser: fastvideo.v1.utils.FlexibleArgumentParser, prefix: str = '') fastvideo.v1.utils.FlexibleArgumentParser[source]#
check_pipeline_config() None[source]#
disable_autocast: bool[source]#

False

dit_config: fastvideo.v1.configs.models.DiTConfig[source]#

‘field(…)’

dit_precision: str[source]#

‘bf16’

dump_to_json(file_path: str)[source]#
embedded_cfg_scale: float[source]#

6.0

flow_shift: Optional[float][source]#

None

classmethod from_kwargs(kwargs: Dict[str, Any], config_cli_prefix: str = '') fastvideo.v1.configs.pipelines.base.PipelineConfig[source]#

Load PipelineConfig from kwargs Dictionary. kwargs: dictionary of kwargs config_cli_prefix: prefix of CLI arguments for this PipelineConfig instance

classmethod from_pretrained(model_path: str) fastvideo.v1.configs.pipelines.base.PipelineConfig[source]#

use the pipeline class setting from model_path to match the pipeline config

image_encoder_config: fastvideo.v1.configs.models.EncoderConfig[source]#

‘field(…)’

image_encoder_precision: str[source]#

‘fp32’

load_from_json(file_path: str)[source]#
lora_nickname: Optional[str][source]#

‘default’

lora_path: Optional[str][source]#

None

lora_target_names: Optional[List[str]][source]#

None

mask_strategy_file_path: Optional[str][source]#

None

model_path: str = <Multiline-String>[source]#
neg_magic: Optional[str][source]#

None

pipeline_config_path: Optional[str][source]#

None

pos_magic: Optional[str][source]#

None

postprocess_text_funcs: Tuple[Callable[[fastvideo.v1.configs.models.encoders.BaseEncoderOutput], torch.tensor], ...][source]#

‘field(…)’

preprocess_text_funcs: Tuple[Callable[[str], str], ...][source]#

‘field(…)’

skip_time_steps: int[source]#

15

text_encoder_configs: Tuple[fastvideo.v1.configs.models.EncoderConfig, ...][source]#

‘field(…)’

text_encoder_precisions: Tuple[str, ...][source]#

‘field(…)’

timesteps_scale: Optional[bool][source]#

None

update_config_from_dict(args: Dict[str, Any], prefix: str = '') None[source]#
update_pipeline_config(source_pipeline_dict: Dict[str, Any]) None[source]#
vae_config: fastvideo.v1.configs.models.VAEConfig[source]#

‘field(…)’

vae_precision: str[source]#

‘fp16’

vae_sp: bool[source]#

True

vae_tiling: bool[source]#

True

class fastvideo.v1.configs.pipelines.base.STA_Mode[source]#

Bases: str, enum.Enum

STA (Sliding Tile Attention) modes.

Initialization

Initialize self. See help(type(self)) for accurate signature.

NONE[source]#

None

STA_INFERENCE[source]#

‘STA_inference’

STA_SEARCHING[source]#

‘STA_searching’

STA_TUNING[source]#

‘STA_tuning’

STA_TUNING_CFG[source]#

‘STA_tuning_cfg’

class fastvideo.v1.configs.pipelines.base.SlidingTileAttnConfig[source]#

Bases: fastvideo.v1.configs.pipelines.base.PipelineConfig

Configuration for sliding tile attention.

height: int[source]#

576

pad_to_square: bool[source]#

False

stride: int[source]#

8

use_overlap_optimization: bool#

True

width: int[source]#

1024

window_size: int[source]#

16

fastvideo.v1.configs.pipelines.base.logger[source]#

‘init_logger(…)’

fastvideo.v1.configs.pipelines.base.postprocess_text(output: fastvideo.v1.configs.models.encoders.BaseEncoderOutput) torch.tensor[source]#
fastvideo.v1.configs.pipelines.base.preprocess_text(prompt: str) str[source]#