Generate an image from another image and a description

Code :


# Source : https://colab.research.google.com/drive/1dlgggNa5Mz8sEAGU0wFCHhGLFooW_pf1?usp=sharing#scrollTo=JpjEKYlXXFd0

# !pip install transformers diffusers==0.2.4 # For now specific version needed as update broke something

# Parameters
# Input image URL
# input_image_url = 'https://lafeber.com/pet-birds/wp-content/uploads/2018/06/Scarlet-Macaw-2.jpg'
input_image_url = 'https://img.cutenesscdn.com/630x/clsd/getty/33e691dded7d4ddc87a83613469f897e?type=webp'


import torch
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, UNet2DConditionModel, LMSDiscreteScheduler
from tqdm.auto import tqdm
from torch import autocast
from PIL import Image
from matplotlib import pyplot as plt
import numpy
from torchvision import transforms as tfms

# For video display:
from IPython.display import HTML
from base64 import b64encode

import urllib.request

# Set device
torch_device = "cuda" if torch.cuda.is_available() else "cpu"

# Load the autoencoder model which will be used to decode the latents into image space. 
vae = AutoencoderKL.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="vae", use_auth_token=False)

# Load the tokenizer and text encoder to tokenize and encode the text. 
tokenizer = CLIPTokenizer.from_pretrained("openai/clip-vit-large-patch14")
text_encoder = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")

# The UNet model for generating the latents.
unet = UNet2DConditionModel.from_pretrained("CompVis/stable-diffusion-v1-4", subfolder="unet", use_auth_token=False)

# The noise scheduler
scheduler = LMSDiscreteScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000)

# To the GPU we go!
vae = vae.to(torch_device)
text_encoder = text_encoder.to(torch_device)
unet = unet.to(torch_device)

# Using torchvision.transforms.ToTensor
to_tensor_tfm = tfms.ToTensor()

def pil_to_latent(input_im):
  # Single image -> single latent in a batch (so size 1, 4, 64, 64)
  with torch.no_grad():
    latent = vae.encode(to_tensor_tfm(input_im).unsqueeze(0).to(torch_device)*2-1) # Note scaling
  return 0.18215 * latent.mode() # .mode or .mean or .sample
  # return 0.18215 * latent.latent_dist.mode() # .mode or .mean or .sample

def latents_to_pil(latents):
  # bath of latents -> list of images
  latents = (1 / 0.18215) * latents
  with torch.no_grad():
    image = vae.decode(latents) 
    # image = vae.decode(latents).sample
  image = (image / 2 + 0.5).clamp(0, 1)
  image = image.detach().cpu().permute(0, 2, 3, 1).numpy()
  images = (image * 255).round().astype("uint8")
  pil_images = [Image.fromarray(image) for image in images]
  return pil_images

# Download a demo Image
# !curl --output macaw.jpg 'https://lafeber.com/pet-birds/wp-content/uploads/2018/06/Scarlet-Macaw-2.jpg'
# urllib.request.urlretrieve(input_image_url, "alter_input.jpg")

# Load the image with PIL
input_image = Image.open('alter_input.jpg').resize((512, 512))
# display(input_image)

# Encode to the latent space
encoded = pil_to_latent(input_image)
encoded.shape

# Decode this latent representation back into an image
decoded = latents_to_pil(encoded)[0]
decoded

# Setting the number of sampling steps:
scheduler.set_timesteps(15)

# See these in terms of the original 1000 steps used for training:
print(scheduler.timesteps)

# Look at the equivalent noise levels:
print(scheduler.sigmas)

#@markdown Plotting this noise schedule:
# plt.plot(scheduler.sigmas)
# plt.title('Noise Schedule')
# plt.xlabel('Step')
# plt.ylabel('sigma')
# plt.show()

# View a noised version
noise = torch.randn_like(encoded) # Random noise
timestep = 150 # i.e. equivalent to that at 150/1000 training steps
encoded_and_noised = scheduler.add_noise(encoded, noise, timestep)
# display(latents_to_pil(encoded_and_noised)[0]) # Display


#@title re-generate starting from a noised version of this image
prompt = ["A colorful dancer, nat geo photo"] #@param
height = 512                        # default height of Stable Diffusion
width = 512                         # default width of Stable Diffusion
num_inference_steps = 40  #@param           # Number of denoising steps
guidance_scale = 8                # Scale for classifier-free guidance
generator = torch.manual_seed(32)   # Seed generator to create the inital latent noise
batch_size = 1

# Prep text 
text_input = tokenizer(prompt, padding="max_length", max_length=tokenizer.model_max_length, truncation=True, return_tensors="pt")
with torch.no_grad():
  text_embeddings = text_encoder(text_input.input_ids.to(torch_device))[0]
max_length = text_input.input_ids.shape[-1]
uncond_input = tokenizer(
    [""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt"
)
with torch.no_grad():
  uncond_embeddings = text_encoder(uncond_input.input_ids.to(torch_device))[0] 
text_embeddings = torch.cat([uncond_embeddings, text_embeddings])

# Prep Scheduler
scheduler.set_timesteps(num_inference_steps)
print("timesteps :")
print(scheduler.timesteps)

# Start step
start_step = 4 #@param Explore ;)
start_sigma = scheduler.sigmas[start_step]
start_timestep = int(scheduler.timesteps[start_step])
print(f"start_timestep = {start_timestep}")

# Prep latents
noise = torch.randn_like(encoded)
latents = scheduler.add_noise(encoded, noise, start_timestep)
# latents = scheduler.add_noise(encoded, noise, timestep)
# display(latents_to_pil(latents)[0])
latents = latents.to(torch_device)
latents = latents * start_sigma  

# Loop
# with autocast("cuda"):
for i, t in tqdm(enumerate(scheduler.timesteps)):
    if i > start_step:
      # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.
      latent_model_input = torch.cat([latents] * 2)
      sigma = scheduler.sigmas[i]
      latent_model_input = latent_model_input / ((sigma**2 + 1) ** 0.5)

      # predict the noise residual
      with torch.no_grad():
        noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings)["sample"]

      # perform guidance
      noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
      noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)

      # compute the previous noisy sample x_t -> x_t-1
      latents = scheduler.step(noise_pred, i, latents)["prev_sample"]

latents_to_pil(latents)[0].save("alter.png")

Input image :

Result :