Skip to content

entrypoints

Modules

fastvideo.entrypoints.cli

Modules

fastvideo.entrypoints.cli.cli_types
Classes
fastvideo.entrypoints.cli.cli_types.CLISubcommand

Base class for CLI subcommands

Functions
fastvideo.entrypoints.cli.cli_types.CLISubcommand.cmd
cmd(args: Namespace) -> None

Execute the command with the given arguments

Source code in fastvideo/entrypoints/cli/cli_types.py
def cmd(self, args: argparse.Namespace) -> None:
    """Execute the command with the given arguments"""
    raise NotImplementedError
fastvideo.entrypoints.cli.cli_types.CLISubcommand.subparser_init
subparser_init(subparsers: _SubParsersAction) -> FlexibleArgumentParser

Initialize the subparser for this command

Source code in fastvideo/entrypoints/cli/cli_types.py
def subparser_init(
        self,
        subparsers: argparse._SubParsersAction) -> FlexibleArgumentParser:
    """Initialize the subparser for this command"""
    raise NotImplementedError
fastvideo.entrypoints.cli.cli_types.CLISubcommand.validate
validate(args: Namespace) -> None

Validate the arguments for this command

Source code in fastvideo/entrypoints/cli/cli_types.py
def validate(self, args: argparse.Namespace) -> None:
    """Validate the arguments for this command"""
    pass
fastvideo.entrypoints.cli.generate
Classes
fastvideo.entrypoints.cli.generate.GenerateSubcommand
GenerateSubcommand()

Bases: CLISubcommand

The generate subcommand for the FastVideo CLI

Source code in fastvideo/entrypoints/cli/generate.py
def __init__(self) -> None:
    self.name = "generate"
    super().__init__()
    self.init_arg_names = self._get_init_arg_names()
    self.generation_arg_names = self._get_generation_arg_names()
Functions
fastvideo.entrypoints.cli.generate.GenerateSubcommand.validate
validate(args: Namespace) -> None

Validate the arguments for this command

Source code in fastvideo/entrypoints/cli/generate.py
def validate(self, args: argparse.Namespace) -> None:
    """Validate the arguments for this command"""
    if args.num_gpus is not None and args.num_gpus <= 0:
        raise ValueError("Number of gpus must be positive")

    if args.config and not os.path.exists(args.config):
        raise ValueError(f"Config file not found: {args.config}")
Functions
fastvideo.entrypoints.cli.main
Classes
Functions
fastvideo.entrypoints.cli.main.cmd_init
cmd_init() -> list[CLISubcommand]

Initialize all commands from separate modules

Source code in fastvideo/entrypoints/cli/main.py
def cmd_init() -> list[CLISubcommand]:
    """Initialize all commands from separate modules"""
    commands = []
    commands.extend(generate_cmd_init())
    return commands
fastvideo.entrypoints.cli.utils
Functions
fastvideo.entrypoints.cli.utils.launch_distributed
launch_distributed(num_gpus: int, args: list[str], master_port: int | None = None) -> int

Launch a distributed job with the given arguments

Parameters:

Name Type Description Default
num_gpus int

Number of GPUs to use

required
args list[str]

Arguments to pass to v1_fastvideo_inference.py (defaults to sys.argv[1:])

required
master_port int | None

Port for the master process (default: random)

None
Source code in fastvideo/entrypoints/cli/utils.py
def launch_distributed(num_gpus: int,
                       args: list[str],
                       master_port: int | None = None) -> int:
    """
    Launch a distributed job with the given arguments

    Args:
        num_gpus: Number of GPUs to use
        args: Arguments to pass to v1_fastvideo_inference.py (defaults to sys.argv[1:])
        master_port: Port for the master process (default: random)
    """

    current_env = os.environ.copy()
    python_executable = sys.executable
    project_root = os.path.abspath(
        os.path.join(os.path.dirname(__file__), "../../../.."))
    main_script = os.path.join(project_root,
                               "fastvideo/sample/v1_fastvideo_inference.py")

    cmd = [
        python_executable, "-m", "torch.distributed.run",
        f"--nproc_per_node={num_gpus}"
    ]

    if master_port is not None:
        cmd.append(f"--master_port={master_port}")

    cmd.append(main_script)
    cmd.extend(args)

    logger.info("Running inference with %d GPU(s)", num_gpus)
    logger.info("Launching command: %s", " ".join(cmd))

    current_env["PYTHONIOENCODING"] = "utf-8"
    process = subprocess.Popen(cmd,
                               env=current_env,
                               stdout=subprocess.PIPE,
                               stderr=subprocess.STDOUT,
                               universal_newlines=True,
                               bufsize=1,
                               encoding='utf-8',
                               errors='replace')

    if process.stdout:
        for line in iter(process.stdout.readline, ''):
            print(line.strip())

    return process.wait()

fastvideo.entrypoints.video_generator

VideoGenerator module for FastVideo.

This module provides a consolidated interface for generating videos using diffusion models.

Classes

fastvideo.entrypoints.video_generator.VideoGenerator
VideoGenerator(fastvideo_args: FastVideoArgs, executor_class: type[Executor], log_stats: bool)

A unified class for generating videos using diffusion models.

This class provides a simple interface for video generation with rich customization options, similar to popular frameworks like HF Diffusers.

Initialize the video generator.

Parameters:

Name Type Description Default
fastvideo_args FastVideoArgs

The inference arguments

required
executor_class type[Executor]

The executor class to use for inference

required
Source code in fastvideo/entrypoints/video_generator.py
def __init__(self, fastvideo_args: FastVideoArgs,
             executor_class: type[Executor], log_stats: bool):
    """
    Initialize the video generator.

    Args:
        fastvideo_args: The inference arguments
        executor_class: The executor class to use for inference
    """
    self.fastvideo_args = fastvideo_args
    self.executor = executor_class(fastvideo_args)
Functions
fastvideo.entrypoints.video_generator.VideoGenerator.from_fastvideo_args classmethod
from_fastvideo_args(fastvideo_args: FastVideoArgs) -> VideoGenerator

Create a video generator with the specified arguments.

Parameters:

Name Type Description Default
fastvideo_args FastVideoArgs

The inference arguments

required

Returns:

Type Description
VideoGenerator

The created video generator

Source code in fastvideo/entrypoints/video_generator.py
@classmethod
def from_fastvideo_args(cls,
                        fastvideo_args: FastVideoArgs) -> "VideoGenerator":
    """
    Create a video generator with the specified arguments.

    Args:
        fastvideo_args: The inference arguments

    Returns:
        The created video generator
    """
    # Initialize distributed environment if needed
    # initialize_distributed_and_parallelism(fastvideo_args)

    executor_class = Executor.get_class(fastvideo_args)
    return cls(
        fastvideo_args=fastvideo_args,
        executor_class=executor_class,
        log_stats=False,  # TODO: implement
    )
fastvideo.entrypoints.video_generator.VideoGenerator.from_pretrained classmethod
from_pretrained(model_path: str, device: str | None = None, torch_dtype: dtype | None = None, **kwargs) -> VideoGenerator

Create a video generator from a pretrained model.

Parameters:

Name Type Description Default
model_path str

Path or identifier for the pretrained model

required
device str | None

Device to load the model on (e.g., "cuda", "cuda:0", "cpu")

None
torch_dtype dtype | None

Data type for model weights (e.g., torch.float16)

None
pipeline_config

Pipeline config to use for inference

required
**kwargs

Additional arguments to customize model loading, set any FastVideoArgs or PipelineConfig attributes here.

{}

Returns:

Type Description
VideoGenerator

The created video generator

Priority level: Default pipeline config < User's pipeline config < User's kwargs

Source code in fastvideo/entrypoints/video_generator.py
@classmethod
def from_pretrained(cls,
                    model_path: str,
                    device: str | None = None,
                    torch_dtype: torch.dtype | None = None,
                    **kwargs) -> "VideoGenerator":
    """
    Create a video generator from a pretrained model.

    Args:
        model_path: Path or identifier for the pretrained model
        device: Device to load the model on (e.g., "cuda", "cuda:0", "cpu")
        torch_dtype: Data type for model weights (e.g., torch.float16)
        pipeline_config: Pipeline config to use for inference
        **kwargs: Additional arguments to customize model loading, set any FastVideoArgs or PipelineConfig attributes here.

    Returns:
        The created video generator

    Priority level: Default pipeline config < User's pipeline config < User's kwargs
    """
    # If users also provide some kwargs, it will override the FastVideoArgs and PipelineConfig.
    kwargs['model_path'] = model_path
    fastvideo_args = FastVideoArgs.from_kwargs(**kwargs)

    return cls.from_fastvideo_args(fastvideo_args)
fastvideo.entrypoints.video_generator.VideoGenerator.generate_video
generate_video(prompt: str | None = None, sampling_param: SamplingParam | None = None, **kwargs) -> dict[str, Any] | list[ndarray] | list[dict[str, Any]]

Generate a video based on the given prompt.

Parameters:

Name Type Description Default
prompt str | None

The prompt to use for generation (optional if prompt_txt is provided)

None
negative_prompt

The negative prompt to use (overrides the one in fastvideo_args)

required
output_path

Path to save the video (overrides the one in fastvideo_args)

required
prompt_path

Path to prompt file

required
save_video

Whether to save the video to disk

required
return_frames

Whether to return the raw frames

required
num_inference_steps

Number of denoising steps (overrides fastvideo_args)

required
guidance_scale

Classifier-free guidance scale (overrides fastvideo_args)

required
num_frames

Number of frames to generate (overrides fastvideo_args)

required
height

Height of generated video (overrides fastvideo_args)

required
width

Width of generated video (overrides fastvideo_args)

required
fps

Frames per second for saved video (overrides fastvideo_args)

required
seed

Random seed for generation (overrides fastvideo_args)

required
callback

Callback function called after each step

required
callback_steps

Number of steps between each callback

required

Returns:

Type Description
dict[str, Any] | list[ndarray] | list[dict[str, Any]]

Either the output dictionary, list of frames, or list of results for batch processing

Source code in fastvideo/entrypoints/video_generator.py
def generate_video(
    self,
    prompt: str | None = None,
    sampling_param: SamplingParam | None = None,
    **kwargs,
) -> dict[str, Any] | list[np.ndarray] | list[dict[str, Any]]:
    """
    Generate a video based on the given prompt.

    Args:
        prompt: The prompt to use for generation (optional if prompt_txt is provided)
        negative_prompt: The negative prompt to use (overrides the one in fastvideo_args)
        output_path: Path to save the video (overrides the one in fastvideo_args)
        prompt_path: Path to prompt file
        save_video: Whether to save the video to disk
        return_frames: Whether to return the raw frames
        num_inference_steps: Number of denoising steps (overrides fastvideo_args)
        guidance_scale: Classifier-free guidance scale (overrides fastvideo_args)
        num_frames: Number of frames to generate (overrides fastvideo_args)
        height: Height of generated video (overrides fastvideo_args)
        width: Width of generated video (overrides fastvideo_args)
        fps: Frames per second for saved video (overrides fastvideo_args)
        seed: Random seed for generation (overrides fastvideo_args)
        callback: Callback function called after each step
        callback_steps: Number of steps between each callback

    Returns:
        Either the output dictionary, list of frames, or list of results for batch processing
    """
    # Handle batch processing from text file
    if sampling_param is None:
        sampling_param = SamplingParam.from_pretrained(
            self.fastvideo_args.model_path)
    sampling_param.update(kwargs)

    if self.fastvideo_args.prompt_txt is not None or sampling_param.prompt_path is not None:
        prompt_txt_path = sampling_param.prompt_path or self.fastvideo_args.prompt_txt
        if not os.path.exists(prompt_txt_path):
            raise FileNotFoundError(
                f"Prompt text file not found: {prompt_txt_path}")

        # Read prompts from file
        with open(prompt_txt_path, encoding='utf-8') as f:
            prompts = [line.strip() for line in f if line.strip()]

        if not prompts:
            raise ValueError(f"No prompts found in file: {prompt_txt_path}")

        logger.info("Found %d prompts in %s", len(prompts), prompt_txt_path)

        results = []
        for i, batch_prompt in enumerate(prompts):
            logger.info("Processing prompt %d/%d: %s...", i + 1,
                        len(prompts), batch_prompt[:100])
            try:
                # Generate video for this prompt using the same logic below
                output_path = self._prepare_output_path(
                    sampling_param.output_path, batch_prompt)
                kwargs["output_path"] = output_path
                result = self._generate_single_video(
                    prompt=batch_prompt,
                    sampling_param=sampling_param,
                    **kwargs)

                # Add prompt info to result
                if isinstance(result, dict):
                    result["prompt_index"] = i
                    result["prompt"] = batch_prompt

                results.append(result)
                logger.info("Successfully generated video for prompt %d",
                            i + 1)

            except Exception as e:
                logger.error("Failed to generate video for prompt %d: %s",
                             i + 1, e)
                continue

        logger.info(
            "Completed batch processing. Generated %d videos successfully.",
            len(results))
        return results

    # Single prompt generation (original behavior)
    if prompt is None:
        raise ValueError("Either prompt or prompt_txt must be provided")
    output_path = self._prepare_output_path(sampling_param.output_path,
                                            prompt)
    kwargs["output_path"] = output_path
    return self._generate_single_video(prompt=prompt,
                                       sampling_param=sampling_param,
                                       **kwargs)
fastvideo.entrypoints.video_generator.VideoGenerator.shutdown
shutdown()

Shutdown the video generator.

Source code in fastvideo/entrypoints/video_generator.py
def shutdown(self):
    """
    Shutdown the video generator.
    """
    self.executor.shutdown()
    del self.executor
fastvideo.entrypoints.video_generator.VideoGenerator.unmerge_lora_weights
unmerge_lora_weights() -> None

Use unmerged weights for inference to produce videos that align with validation videos generated during training.

Source code in fastvideo/entrypoints/video_generator.py
def unmerge_lora_weights(self) -> None:
    """
    Use unmerged weights for inference to produce videos that align with 
    validation videos generated during training.
    """
    self.executor.unmerge_lora_weights()

Functions