pipeline_animation.py 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py
  2. import inspect
  3. from typing import Callable, List, Optional, Union
  4. from dataclasses import dataclass
  5. import numpy as np
  6. import torch
  7. from tqdm import tqdm
  8. from diffusers.utils import is_accelerate_available
  9. from packaging import version
  10. from transformers import CLIPTextModel, CLIPTokenizer
  11. from diffusers.configuration_utils import FrozenDict
  12. from diffusers.models import AutoencoderKL
  13. from diffusers.pipeline_utils import DiffusionPipeline
  14. from diffusers.schedulers import (
  15. DDIMScheduler,
  16. DPMSolverMultistepScheduler,
  17. EulerAncestralDiscreteScheduler,
  18. EulerDiscreteScheduler,
  19. LMSDiscreteScheduler,
  20. PNDMScheduler,
  21. )
  22. from diffusers.utils import deprecate, logging, BaseOutput
  23. from einops import rearrange
  24. from ..models.unet import UNet3DConditionModel
  25. logger = logging.get_logger(__name__) # pylint: disable=invalid-name
  26. @dataclass
  27. class AnimationPipelineOutput(BaseOutput):
  28. videos: Union[torch.Tensor, np.ndarray]
  29. class AnimationPipeline(DiffusionPipeline):
  30. _optional_components = []
  31. def __init__(
  32. self,
  33. vae: AutoencoderKL,
  34. text_encoder: CLIPTextModel,
  35. tokenizer: CLIPTokenizer,
  36. unet: UNet3DConditionModel,
  37. scheduler: Union[
  38. DDIMScheduler,
  39. PNDMScheduler,
  40. LMSDiscreteScheduler,
  41. EulerDiscreteScheduler,
  42. EulerAncestralDiscreteScheduler,
  43. DPMSolverMultistepScheduler,
  44. ],
  45. ):
  46. super().__init__()
  47. if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
  48. deprecation_message = (
  49. f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
  50. f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
  51. "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
  52. " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
  53. " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
  54. " file"
  55. )
  56. deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
  57. new_config = dict(scheduler.config)
  58. new_config["steps_offset"] = 1
  59. scheduler._internal_dict = FrozenDict(new_config)
  60. if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
  61. deprecation_message = (
  62. f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
  63. " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
  64. " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
  65. " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
  66. " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
  67. )
  68. deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
  69. new_config = dict(scheduler.config)
  70. new_config["clip_sample"] = False
  71. scheduler._internal_dict = FrozenDict(new_config)
  72. is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
  73. version.parse(unet.config._diffusers_version).base_version
  74. ) < version.parse("0.9.0.dev0")
  75. is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
  76. if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
  77. deprecation_message = (
  78. "The configuration file of the unet has set the default `sample_size` to smaller than"
  79. " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
  80. " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
  81. " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
  82. " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
  83. " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
  84. " in the config might lead to incorrect results in future versions. If you have downloaded this"
  85. " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
  86. " the `unet/config.json` file"
  87. )
  88. deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
  89. new_config = dict(unet.config)
  90. new_config["sample_size"] = 64
  91. unet._internal_dict = FrozenDict(new_config)
  92. self.register_modules(
  93. vae=vae,
  94. text_encoder=text_encoder,
  95. tokenizer=tokenizer,
  96. unet=unet,
  97. scheduler=scheduler,
  98. )
  99. self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
  100. def enable_vae_slicing(self):
  101. self.vae.enable_slicing()
  102. def disable_vae_slicing(self):
  103. self.vae.disable_slicing()
  104. def enable_sequential_cpu_offload(self, gpu_id=0):
  105. if is_accelerate_available():
  106. from accelerate import cpu_offload
  107. else:
  108. raise ImportError("Please install accelerate via `pip install accelerate`")
  109. device = torch.device(f"cuda:{gpu_id}")
  110. for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]:
  111. if cpu_offloaded_model is not None:
  112. cpu_offload(cpu_offloaded_model, device)
  113. @property
  114. def _execution_device(self):
  115. if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"):
  116. return self.device
  117. for module in self.unet.modules():
  118. if (
  119. hasattr(module, "_hf_hook")
  120. and hasattr(module._hf_hook, "execution_device")
  121. and module._hf_hook.execution_device is not None
  122. ):
  123. return torch.device(module._hf_hook.execution_device)
  124. return self.device
  125. def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt):
  126. batch_size = len(prompt) if isinstance(prompt, list) else 1
  127. text_inputs = self.tokenizer(
  128. prompt,
  129. padding="max_length",
  130. max_length=self.tokenizer.model_max_length,
  131. truncation=True,
  132. return_tensors="pt",
  133. )
  134. text_input_ids = text_inputs.input_ids
  135. untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
  136. if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
  137. removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1])
  138. logger.warning(
  139. "The following part of your input was truncated because CLIP can only handle sequences up to"
  140. f" {self.tokenizer.model_max_length} tokens: {removed_text}"
  141. )
  142. if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
  143. attention_mask = text_inputs.attention_mask.to(device)
  144. else:
  145. attention_mask = None
  146. text_embeddings = self.text_encoder(
  147. text_input_ids.to(device),
  148. attention_mask=attention_mask,
  149. )
  150. text_embeddings = text_embeddings[0]
  151. # duplicate text embeddings for each generation per prompt, using mps friendly method
  152. bs_embed, seq_len, _ = text_embeddings.shape
  153. text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1)
  154. text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1)
  155. # get unconditional embeddings for classifier free guidance
  156. if do_classifier_free_guidance:
  157. uncond_tokens: List[str]
  158. if negative_prompt is None:
  159. uncond_tokens = [""] * batch_size
  160. elif type(prompt) is not type(negative_prompt):
  161. raise TypeError(
  162. f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
  163. f" {type(prompt)}."
  164. )
  165. elif isinstance(negative_prompt, str):
  166. uncond_tokens = [negative_prompt]
  167. elif batch_size != len(negative_prompt):
  168. raise ValueError(
  169. f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
  170. f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
  171. " the batch size of `prompt`."
  172. )
  173. else:
  174. uncond_tokens = negative_prompt
  175. max_length = text_input_ids.shape[-1]
  176. uncond_input = self.tokenizer(
  177. uncond_tokens,
  178. padding="max_length",
  179. max_length=max_length,
  180. truncation=True,
  181. return_tensors="pt",
  182. )
  183. if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
  184. attention_mask = uncond_input.attention_mask.to(device)
  185. else:
  186. attention_mask = None
  187. uncond_embeddings = self.text_encoder(
  188. uncond_input.input_ids.to(device),
  189. attention_mask=attention_mask,
  190. )
  191. uncond_embeddings = uncond_embeddings[0]
  192. # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
  193. seq_len = uncond_embeddings.shape[1]
  194. uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1)
  195. uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1)
  196. # For classifier free guidance, we need to do two forward passes.
  197. # Here we concatenate the unconditional and text embeddings into a single batch
  198. # to avoid doing two forward passes
  199. text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
  200. return text_embeddings
  201. def decode_latents(self, latents):
  202. video_length = latents.shape[2]
  203. latents = 1 / 0.18215 * latents
  204. latents = rearrange(latents, "b c f h w -> (b f) c h w")
  205. # video = self.vae.decode(latents).sample
  206. video = []
  207. for frame_idx in tqdm(range(latents.shape[0])):
  208. video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample)
  209. video = torch.cat(video)
  210. video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length)
  211. video = (video / 2 + 0.5).clamp(0, 1)
  212. # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16
  213. video = video.cpu().float().numpy()
  214. return video
  215. def prepare_extra_step_kwargs(self, generator, eta):
  216. # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
  217. # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
  218. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
  219. # and should be between [0, 1]
  220. accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
  221. extra_step_kwargs = {}
  222. if accepts_eta:
  223. extra_step_kwargs["eta"] = eta
  224. # check if the scheduler accepts generator
  225. accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
  226. if accepts_generator:
  227. extra_step_kwargs["generator"] = generator
  228. return extra_step_kwargs
  229. def check_inputs(self, prompt, height, width, callback_steps):
  230. if not isinstance(prompt, str) and not isinstance(prompt, list):
  231. raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
  232. if height % 8 != 0 or width % 8 != 0:
  233. raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
  234. if (callback_steps is None) or (
  235. callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
  236. ):
  237. raise ValueError(
  238. f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
  239. f" {type(callback_steps)}."
  240. )
  241. def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None):
  242. shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor)
  243. if isinstance(generator, list) and len(generator) != batch_size:
  244. raise ValueError(
  245. f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
  246. f" size of {batch_size}. Make sure the batch size matches the length of the generators."
  247. )
  248. if latents is None:
  249. rand_device = "cpu" if device.type == "mps" else device
  250. if isinstance(generator, list):
  251. shape = shape
  252. # shape = (1,) + shape[1:]
  253. latents = [
  254. torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype)
  255. for i in range(batch_size)
  256. ]
  257. latents = torch.cat(latents, dim=0).to(device)
  258. else:
  259. latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device)
  260. else:
  261. if latents.shape != shape:
  262. raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}")
  263. latents = latents.to(device)
  264. # scale the initial noise by the standard deviation required by the scheduler
  265. latents = latents * self.scheduler.init_noise_sigma
  266. return latents
  267. @torch.no_grad()
  268. def __call__(
  269. self,
  270. prompt: Union[str, List[str]],
  271. video_length: Optional[int],
  272. height: Optional[int] = None,
  273. width: Optional[int] = None,
  274. num_inference_steps: int = 50,
  275. guidance_scale: float = 7.5,
  276. negative_prompt: Optional[Union[str, List[str]]] = None,
  277. num_videos_per_prompt: Optional[int] = 1,
  278. eta: float = 0.0,
  279. generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
  280. latents: Optional[torch.FloatTensor] = None,
  281. output_type: Optional[str] = "tensor",
  282. return_dict: bool = True,
  283. callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
  284. callback_steps: Optional[int] = 1,
  285. **kwargs,
  286. ):
  287. # Default height and width to unet
  288. height = height or self.unet.config.sample_size * self.vae_scale_factor
  289. width = width or self.unet.config.sample_size * self.vae_scale_factor
  290. # Check inputs. Raise error if not correct
  291. self.check_inputs(prompt, height, width, callback_steps)
  292. # Define call parameters
  293. # batch_size = 1 if isinstance(prompt, str) else len(prompt)
  294. batch_size = 1
  295. if latents is not None:
  296. batch_size = latents.shape[0]
  297. if isinstance(prompt, list):
  298. batch_size = len(prompt)
  299. device = self._execution_device
  300. # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
  301. # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
  302. # corresponds to doing no classifier free guidance.
  303. do_classifier_free_guidance = guidance_scale > 1.0
  304. # Encode input prompt
  305. prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size
  306. if negative_prompt is not None:
  307. negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size
  308. text_embeddings = self._encode_prompt(
  309. prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt
  310. )
  311. # Prepare timesteps
  312. self.scheduler.set_timesteps(num_inference_steps, device=device)
  313. timesteps = self.scheduler.timesteps
  314. # Prepare latent variables
  315. num_channels_latents = self.unet.in_channels
  316. latents = self.prepare_latents(
  317. batch_size * num_videos_per_prompt,
  318. num_channels_latents,
  319. video_length,
  320. height,
  321. width,
  322. text_embeddings.dtype,
  323. device,
  324. generator,
  325. latents,
  326. )
  327. latents_dtype = latents.dtype
  328. # Prepare extra step kwargs.
  329. extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
  330. # Denoising loop
  331. num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
  332. with self.progress_bar(total=num_inference_steps) as progress_bar:
  333. for i, t in enumerate(timesteps):
  334. # expand the latents if we are doing classifier free guidance
  335. latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
  336. latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
  337. # predict the noise residual
  338. noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype)
  339. # noise_pred = []
  340. # import pdb
  341. # pdb.set_trace()
  342. # for batch_idx in range(latent_model_input.shape[0]):
  343. # noise_pred_single = self.unet(latent_model_input[batch_idx:batch_idx+1], t, encoder_hidden_states=text_embeddings[batch_idx:batch_idx+1]).sample.to(dtype=latents_dtype)
  344. # noise_pred.append(noise_pred_single)
  345. # noise_pred = torch.cat(noise_pred)
  346. # perform guidance
  347. if do_classifier_free_guidance:
  348. noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
  349. noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
  350. # compute the previous noisy sample x_t -> x_t-1
  351. latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
  352. # call the callback, if provided
  353. if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
  354. progress_bar.update()
  355. if callback is not None and i % callback_steps == 0:
  356. callback(i, t, latents)
  357. # Post-processing
  358. video = self.decode_latents(latents)
  359. # Convert to tensor
  360. if output_type == "tensor":
  361. video = torch.from_numpy(video)
  362. if not return_dict:
  363. return video
  364. return AnimationPipelineOutput(videos=video)