Skip to content

sample

Classes

fastvideo.configs.sample.SamplingParam dataclass

SamplingParam(data_type: str = 'video', image_path: str | None = None, video_path: str | None = None, prompt: str | list[str] | None = None, negative_prompt: str = 'Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 125, num_frames_round_down: bool = False, height: int = 720, width: int = 1280, fps: int = 24, num_inference_steps: int = 50, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Sampling parameters for video generation.

Functions

fastvideo.configs.sample.SamplingParam.add_cli_args staticmethod
add_cli_args(parser: Any) -> Any

Add CLI arguments for SamplingParam fields

Source code in fastvideo/configs/sample/base.py
@staticmethod
def add_cli_args(parser: Any) -> Any:
    """Add CLI arguments for SamplingParam fields"""
    parser.add_argument(
        "--prompt",
        type=str,
        default=SamplingParam.prompt,
        help="Text prompt for video generation",
    )
    parser.add_argument(
        "--negative-prompt",
        type=str,
        default=SamplingParam.negative_prompt,
        help="Negative text prompt for video generation",
    )
    parser.add_argument(
        "--prompt-path",
        type=str,
        default=SamplingParam.prompt_path,
        help="Path to a text file containing the prompt",
    )
    parser.add_argument(
        "--output-path",
        type=str,
        default=SamplingParam.output_path,
        help="Path to save the generated video",
    )
    parser.add_argument(
        "--output-video-name",
        type=str,
        default=SamplingParam.output_video_name,
        help="Name of the output video",
    )
    parser.add_argument(
        "--num-videos-per-prompt",
        type=int,
        default=SamplingParam.num_videos_per_prompt,
        help="Number of videos to generate per prompt",
    )
    parser.add_argument(
        "--seed",
        type=int,
        default=SamplingParam.seed,
        help="Random seed for generation",
    )
    parser.add_argument(
        "--num-frames",
        type=int,
        default=SamplingParam.num_frames,
        help="Number of frames to generate",
    )
    parser.add_argument(
        "--height",
        type=int,
        default=SamplingParam.height,
        help="Height of generated video",
    )
    parser.add_argument(
        "--width",
        type=int,
        default=SamplingParam.width,
        help="Width of generated video",
    )
    parser.add_argument(
        "--fps",
        type=int,
        default=SamplingParam.fps,
        help="Frames per second for saved video",
    )
    parser.add_argument(
        "--num-inference-steps",
        type=int,
        default=SamplingParam.num_inference_steps,
        help="Number of denoising steps",
    )
    parser.add_argument(
        "--guidance-scale",
        type=float,
        default=SamplingParam.guidance_scale,
        help="Classifier-free guidance scale",
    )
    parser.add_argument(
        "--guidance-rescale",
        type=float,
        default=SamplingParam.guidance_rescale,
        help="Guidance rescale factor",
    )
    parser.add_argument(
        "--boundary-ratio",
        type=float,
        default=SamplingParam.boundary_ratio,
        help="Boundary timestep ratio",
    )
    parser.add_argument(
        "--save-video",
        action="store_true",
        default=SamplingParam.save_video,
        help="Whether to save the video to disk",
    )
    parser.add_argument(
        "--no-save-video",
        action="store_false",
        dest="save_video",
        help="Don't save the video to disk",
    )
    parser.add_argument(
        "--return-frames",
        action="store_true",
        default=SamplingParam.return_frames,
        help="Whether to return the raw frames",
    )
    parser.add_argument(
        "--image-path",
        type=str,
        default=SamplingParam.image_path,
        help="Path to input image for image-to-video generation",
    )
    parser.add_argument(
        "--video_path",
        type=str,
        default=SamplingParam.video_path,
        help="Path to input video for video-to-video generation",
    )
    parser.add_argument(
        "--moba-config-path",
        type=str,
        default=None,
        help=
        "Path to a JSON file containing V-MoBA specific configurations.",
    )
    parser.add_argument(
        "--return-trajectory-latents",
        action="store_true",
        default=SamplingParam.return_trajectory_latents,
        help="Whether to return the trajectory",
    )
    parser.add_argument(
        "--return-trajectory-decoded",
        action="store_true",
        default=SamplingParam.return_trajectory_decoded,
        help="Whether to return the decoded trajectory",
    )
    return parser

Modules

fastvideo.configs.sample.base

Classes

fastvideo.configs.sample.base.SamplingParam dataclass
SamplingParam(data_type: str = 'video', image_path: str | None = None, video_path: str | None = None, prompt: str | list[str] | None = None, negative_prompt: str = 'Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 125, num_frames_round_down: bool = False, height: int = 720, width: int = 1280, fps: int = 24, num_inference_steps: int = 50, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Sampling parameters for video generation.

Functions
fastvideo.configs.sample.base.SamplingParam.add_cli_args staticmethod
add_cli_args(parser: Any) -> Any

Add CLI arguments for SamplingParam fields

Source code in fastvideo/configs/sample/base.py
@staticmethod
def add_cli_args(parser: Any) -> Any:
    """Add CLI arguments for SamplingParam fields"""
    parser.add_argument(
        "--prompt",
        type=str,
        default=SamplingParam.prompt,
        help="Text prompt for video generation",
    )
    parser.add_argument(
        "--negative-prompt",
        type=str,
        default=SamplingParam.negative_prompt,
        help="Negative text prompt for video generation",
    )
    parser.add_argument(
        "--prompt-path",
        type=str,
        default=SamplingParam.prompt_path,
        help="Path to a text file containing the prompt",
    )
    parser.add_argument(
        "--output-path",
        type=str,
        default=SamplingParam.output_path,
        help="Path to save the generated video",
    )
    parser.add_argument(
        "--output-video-name",
        type=str,
        default=SamplingParam.output_video_name,
        help="Name of the output video",
    )
    parser.add_argument(
        "--num-videos-per-prompt",
        type=int,
        default=SamplingParam.num_videos_per_prompt,
        help="Number of videos to generate per prompt",
    )
    parser.add_argument(
        "--seed",
        type=int,
        default=SamplingParam.seed,
        help="Random seed for generation",
    )
    parser.add_argument(
        "--num-frames",
        type=int,
        default=SamplingParam.num_frames,
        help="Number of frames to generate",
    )
    parser.add_argument(
        "--height",
        type=int,
        default=SamplingParam.height,
        help="Height of generated video",
    )
    parser.add_argument(
        "--width",
        type=int,
        default=SamplingParam.width,
        help="Width of generated video",
    )
    parser.add_argument(
        "--fps",
        type=int,
        default=SamplingParam.fps,
        help="Frames per second for saved video",
    )
    parser.add_argument(
        "--num-inference-steps",
        type=int,
        default=SamplingParam.num_inference_steps,
        help="Number of denoising steps",
    )
    parser.add_argument(
        "--guidance-scale",
        type=float,
        default=SamplingParam.guidance_scale,
        help="Classifier-free guidance scale",
    )
    parser.add_argument(
        "--guidance-rescale",
        type=float,
        default=SamplingParam.guidance_rescale,
        help="Guidance rescale factor",
    )
    parser.add_argument(
        "--boundary-ratio",
        type=float,
        default=SamplingParam.boundary_ratio,
        help="Boundary timestep ratio",
    )
    parser.add_argument(
        "--save-video",
        action="store_true",
        default=SamplingParam.save_video,
        help="Whether to save the video to disk",
    )
    parser.add_argument(
        "--no-save-video",
        action="store_false",
        dest="save_video",
        help="Don't save the video to disk",
    )
    parser.add_argument(
        "--return-frames",
        action="store_true",
        default=SamplingParam.return_frames,
        help="Whether to return the raw frames",
    )
    parser.add_argument(
        "--image-path",
        type=str,
        default=SamplingParam.image_path,
        help="Path to input image for image-to-video generation",
    )
    parser.add_argument(
        "--video_path",
        type=str,
        default=SamplingParam.video_path,
        help="Path to input video for video-to-video generation",
    )
    parser.add_argument(
        "--moba-config-path",
        type=str,
        default=None,
        help=
        "Path to a JSON file containing V-MoBA specific configurations.",
    )
    parser.add_argument(
        "--return-trajectory-latents",
        action="store_true",
        default=SamplingParam.return_trajectory_latents,
        help="Whether to return the trajectory",
    )
    parser.add_argument(
        "--return-trajectory-decoded",
        action="store_true",
        default=SamplingParam.return_trajectory_decoded,
        help="Whether to return the decoded trajectory",
    )
    return parser

Functions

fastvideo.configs.sample.registry

Classes

Functions

fastvideo.configs.sample.registry.get_sampling_param_cls_for_name
get_sampling_param_cls_for_name(pipeline_name_or_path: str) -> Any | None

Get the appropriate sampling param for specific pretrained weights.

Source code in fastvideo/configs/sample/registry.py
def get_sampling_param_cls_for_name(pipeline_name_or_path: str) -> Any | None:
    """Get the appropriate sampling param for specific pretrained weights."""

    if os.path.exists(pipeline_name_or_path):
        config = verify_model_config_and_directory(pipeline_name_or_path)
        logger.warning(
            "FastVideo may not correctly identify the optimal sampling param for this model, as the local directory may have been renamed."
        )
    else:
        config = maybe_download_model_index(pipeline_name_or_path)

    pipeline_name = config["_class_name"]

    # First try exact match for specific weights
    if pipeline_name_or_path in SAMPLING_PARAM_REGISTRY:
        return SAMPLING_PARAM_REGISTRY[pipeline_name_or_path]

    # Try partial matches (for local paths that might include the weight ID)
    for registered_id, config_class in SAMPLING_PARAM_REGISTRY.items():
        if registered_id in pipeline_name_or_path:
            return config_class

    # If no match, try to use the fallback config
    fallback_config = None
    # Try to determine pipeline architecture for fallback
    for pipeline_type, detector in SAMPLING_PARAM_DETECTOR.items():
        if detector(pipeline_name.lower()):
            fallback_config = SAMPLING_FALLBACK_PARAM.get(pipeline_type)
            break

    logger.warning(
        "No match found for pipeline %s, using fallback sampling param %s.",
        pipeline_name_or_path, fallback_config)
    return fallback_config

fastvideo.configs.sample.wan

Classes

fastvideo.configs.sample.wan.Wan2_1_Fun_1_3B_InP_SamplingParam dataclass
Wan2_1_Fun_1_3B_InP_SamplingParam(data_type: str = 'video', image_path: str | None = None, video_path: str | None = None, prompt: str | list[str] | None = None, negative_prompt: str | None = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 81, num_frames_round_down: bool = False, height: int = 480, width: int = 832, fps: int = 16, num_inference_steps: int = 50, guidance_scale: float = 6.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: SamplingParam

Sampling parameters for Wan2.1 Fun 1.3B InP model.

fastvideo.configs.sample.wan.Wan2_2_Base_SamplingParam dataclass
Wan2_2_Base_SamplingParam(data_type: str = 'video', image_path: str | None = None, video_path: str | None = None, prompt: str | list[str] | None = None, negative_prompt: str | None = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 125, num_frames_round_down: bool = False, height: int = 720, width: int = 1280, fps: int = 24, num_inference_steps: int = 50, guidance_scale: float = 1.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: SamplingParam

Sampling parameters for Wan2.2 TI2V 5B model.

fastvideo.configs.sample.wan.Wan2_2_TI2V_5B_SamplingParam dataclass
Wan2_2_TI2V_5B_SamplingParam(data_type: str = 'video', image_path: str | None = None, video_path: str | None = None, prompt: str | list[str] | None = None, negative_prompt: str | None = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走', prompt_path: str | None = None, output_path: str = 'outputs/', output_video_name: str | None = None, num_videos_per_prompt: int = 1, seed: int = 1024, num_frames: int = 121, num_frames_round_down: bool = False, height: int = 704, width: int = 1280, fps: int = 24, num_inference_steps: int = 50, guidance_scale: float = 5.0, guidance_rescale: float = 0.0, boundary_ratio: float | None = None, enable_teacache: bool = False, save_video: bool = True, return_frames: bool = False, return_trajectory_latents: bool = False, return_trajectory_decoded: bool = False)

Bases: Wan2_2_Base_SamplingParam

Sampling parameters for Wan2.2 TI2V 5B model.