From 2870a428520d0a54046ef10197932926bf169448 Mon Sep 17 00:00:00 2001 From: nateraw Date: Thu, 20 Oct 2022 03:08:22 +0000 Subject: [PATCH 1/3] :pushpin: use latest diffusers --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 881a9fa..2681516 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ transformers -diffusers==0.5.1 +diffusers==0.6.0 scipy fire gradio From 6974165d3d0cebcfb372c7f083a23534c60d2099 Mon Sep 17 00:00:00 2001 From: nateraw Date: Thu, 20 Oct 2022 03:14:00 +0000 Subject: [PATCH 2/3] :bug: sample -> images --- stable_diffusion_videos/image_generation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stable_diffusion_videos/image_generation.py b/stable_diffusion_videos/image_generation.py index 420b625..63f15ee 100644 --- a/stable_diffusion_videos/image_generation.py +++ b/stable_diffusion_videos/image_generation.py @@ -182,7 +182,7 @@ def generate_images( height=height, width=width, output_type="pil" if not upsample else "numpy", - )['sample'] + )['images'] if upsample: images = [] for output in outputs: From f29cb24090555589be6ab2cf4821c4d9721c51b2 Mon Sep 17 00:00:00 2001 From: nateraw Date: Thu, 20 Oct 2022 03:16:06 +0000 Subject: [PATCH 3/3] :pushpin: pin release 0.6.0 and update readme --- README.md | 4 +--- stable_diffusion_videos/__init__.py | 2 +- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index b0ac2ac..3c1b6db 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ The app is built with [Gradio](https://gradio.app/), which allows you to interac 2. Generate videos using the "Videos" tab - Using the images you found from the step above, provide the prompts/seeds you recorded - - Set the `num_walk_steps` - for testing you can use a small number like 3 or 5, but to get great results you'll want to use something larger (60-200 steps). + - Set the `num_interpolation_steps` - for testing you can use a small number like 3 or 5, but to get great results you'll want to use something larger (60-200 steps). - You can set the `output_dir` to the directory you wish to save to ## Python Package @@ -51,7 +51,6 @@ import torch pipeline = StableDiffusionWalkPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", - use_auth_token=True, torch_dtype=torch.float16, revision="fp16", scheduler=LMSDiscreteScheduler( @@ -83,7 +82,6 @@ import torch pipeline = StableDiffusionWalkPipeline.from_pretrained( "CompVis/stable-diffusion-v1-4", - use_auth_token=True, torch_dtype=torch.float16, revision="fp16", scheduler=LMSDiscreteScheduler( diff --git a/stable_diffusion_videos/__init__.py b/stable_diffusion_videos/__init__.py index 8617b02..39fb9e0 100644 --- a/stable_diffusion_videos/__init__.py +++ b/stable_diffusion_videos/__init__.py @@ -114,4 +114,4 @@ def __dir__(): }, ) -__version__ = "0.5.3" +__version__ = "0.6.0"