mirror of
https://github.com/ostris/ai-toolkit.git
synced 2026-04-29 10:41:28 +00:00
Initial support for RamTorch. Still a WIP
This commit is contained in:
@@ -9,63 +9,69 @@ from PIL import Image
|
||||
from toolkit.models.base_model import BaseModel
|
||||
from toolkit.basic import flush
|
||||
from toolkit.prompt_utils import PromptEmbeds
|
||||
from toolkit.samplers.custom_flowmatch_sampler import CustomFlowMatchEulerDiscreteScheduler
|
||||
from toolkit.samplers.custom_flowmatch_sampler import (
|
||||
CustomFlowMatchEulerDiscreteScheduler,
|
||||
)
|
||||
from toolkit.accelerator import get_accelerator, unwrap_model
|
||||
from optimum.quanto import freeze, QTensor
|
||||
from toolkit.util.quantize import quantize, get_qtype, quantize_model
|
||||
import torch.nn.functional as F
|
||||
from toolkit.memory_management import MemoryManager
|
||||
from safetensors.torch import load_file
|
||||
|
||||
from diffusers import QwenImagePipeline, QwenImageTransformer2DModel, AutoencoderKLQwenImage
|
||||
from transformers import Qwen2_5_VLForConditionalGeneration, Qwen2Tokenizer, Qwen2VLProcessor
|
||||
from diffusers import (
|
||||
QwenImagePipeline,
|
||||
QwenImageTransformer2DModel,
|
||||
AutoencoderKLQwenImage,
|
||||
)
|
||||
from transformers import (
|
||||
Qwen2_5_VLForConditionalGeneration,
|
||||
Qwen2Tokenizer,
|
||||
Qwen2VLProcessor,
|
||||
)
|
||||
from tqdm import tqdm
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from toolkit.data_transfer_object.data_loader import DataLoaderBatchDTO
|
||||
|
||||
scheduler_config = {
|
||||
"base_image_seq_len": 256,
|
||||
"base_shift": 0.5,
|
||||
"invert_sigmas": False,
|
||||
"max_image_seq_len": 8192,
|
||||
"max_shift": 0.9,
|
||||
"num_train_timesteps": 1000,
|
||||
"shift": 1.0,
|
||||
"shift_terminal": 0.02,
|
||||
"stochastic_sampling": False,
|
||||
"time_shift_type": "exponential",
|
||||
"use_beta_sigmas": False,
|
||||
"use_dynamic_shifting": True,
|
||||
"use_exponential_sigmas": False,
|
||||
"use_karras_sigmas": False
|
||||
"base_image_seq_len": 256,
|
||||
"base_shift": 0.5,
|
||||
"invert_sigmas": False,
|
||||
"max_image_seq_len": 8192,
|
||||
"max_shift": 0.9,
|
||||
"num_train_timesteps": 1000,
|
||||
"shift": 1.0,
|
||||
"shift_terminal": 0.02,
|
||||
"stochastic_sampling": False,
|
||||
"time_shift_type": "exponential",
|
||||
"use_beta_sigmas": False,
|
||||
"use_dynamic_shifting": True,
|
||||
"use_exponential_sigmas": False,
|
||||
"use_karras_sigmas": False,
|
||||
}
|
||||
|
||||
|
||||
|
||||
class QwenImageModel(BaseModel):
|
||||
arch = "qwen_image"
|
||||
_qwen_image_keep_visual = False
|
||||
_qwen_pipeline = QwenImagePipeline
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
device,
|
||||
model_config: ModelConfig,
|
||||
dtype='bf16',
|
||||
custom_pipeline=None,
|
||||
noise_scheduler=None,
|
||||
**kwargs
|
||||
self,
|
||||
device,
|
||||
model_config: ModelConfig,
|
||||
dtype="bf16",
|
||||
custom_pipeline=None,
|
||||
noise_scheduler=None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(
|
||||
device,
|
||||
model_config,
|
||||
dtype,
|
||||
custom_pipeline,
|
||||
noise_scheduler,
|
||||
**kwargs
|
||||
device, model_config, dtype, custom_pipeline, noise_scheduler, **kwargs
|
||||
)
|
||||
self.is_flow_matching = True
|
||||
self.is_transformer = True
|
||||
self.target_lora_modules = ['QwenImageTransformer2DModel']
|
||||
self.target_lora_modules = ["QwenImageTransformer2DModel"]
|
||||
|
||||
# static method to get the noise scheduler
|
||||
@staticmethod
|
||||
@@ -73,40 +79,58 @@ class QwenImageModel(BaseModel):
|
||||
return CustomFlowMatchEulerDiscreteScheduler(**scheduler_config)
|
||||
|
||||
def get_bucket_divisibility(self):
|
||||
return 16 * 2 # 16 for the VAE, 2 for patch size
|
||||
return 16 * 2 # 16 for the VAE, 2 for patch size
|
||||
|
||||
def load_model(self):
|
||||
dtype = self.torch_dtype
|
||||
self.print_and_status_update("Loading Qwen Image model")
|
||||
model_path = self.model_config.name_or_path
|
||||
base_model_path = self.model_config.extras_name_or_path
|
||||
model_dtype = dtype
|
||||
|
||||
transformer_path = model_path
|
||||
transformer_subfolder = 'transformer'
|
||||
if os.path.exists(transformer_path):
|
||||
transformer_subfolder = None
|
||||
transformer_path = os.path.join(transformer_path, 'transformer')
|
||||
# check if the path is a full checkpoint.
|
||||
te_folder_path = os.path.join(model_path, 'text_encoder')
|
||||
# if we have the te, this folder is a full checkpoint, use it as the base
|
||||
if os.path.exists(te_folder_path):
|
||||
base_model_path = model_path
|
||||
if base_model_path.endswith(".safetensors"):
|
||||
# use the repo for extras
|
||||
base_model_path = "Qwen/Qwen-Image"
|
||||
|
||||
self.print_and_status_update("Loading transformer")
|
||||
transformer = QwenImageTransformer2DModel.from_pretrained(
|
||||
transformer_path,
|
||||
subfolder=transformer_subfolder,
|
||||
torch_dtype=dtype
|
||||
)
|
||||
|
||||
if model_path.endswith(".safetensors"):
|
||||
# load the safetensors file
|
||||
transformer = QwenImageTransformer2DModel.from_single_file(
|
||||
model_path,
|
||||
config="Qwen/Qwen-Image",
|
||||
subfolder="transformer",
|
||||
torch_dtype=model_dtype,
|
||||
)
|
||||
transformer.to(model_dtype)
|
||||
|
||||
else:
|
||||
transformer_path = model_path
|
||||
transformer_subfolder = "transformer"
|
||||
if os.path.exists(transformer_path):
|
||||
transformer_subfolder = None
|
||||
transformer_path = os.path.join(transformer_path, "transformer")
|
||||
# check if the path is a full checkpoint.
|
||||
te_folder_path = os.path.join(model_path, "text_encoder")
|
||||
# if we have the te, this folder is a full checkpoint, use it as the base
|
||||
if os.path.exists(te_folder_path):
|
||||
base_model_path = model_path
|
||||
|
||||
transformer = QwenImageTransformer2DModel.from_pretrained(
|
||||
transformer_path, subfolder=transformer_subfolder, torch_dtype=dtype
|
||||
)
|
||||
|
||||
if self.model_config.quantize:
|
||||
self.print_and_status_update("Quantizing Transformer")
|
||||
quantize_model(self, transformer)
|
||||
flush()
|
||||
|
||||
|
||||
if self.model_config.auto_memory:
|
||||
MemoryManager.attach(transformer, self.device_torch)
|
||||
|
||||
if self.model_config.low_vram:
|
||||
self.print_and_status_update("Moving transformer to CPU")
|
||||
transformer.to('cpu')
|
||||
transformer.to("cpu")
|
||||
|
||||
flush()
|
||||
|
||||
@@ -117,32 +141,35 @@ class QwenImageModel(BaseModel):
|
||||
text_encoder = Qwen2_5_VLForConditionalGeneration.from_pretrained(
|
||||
base_model_path, subfolder="text_encoder", torch_dtype=dtype
|
||||
)
|
||||
|
||||
|
||||
# remove the visual model as it is not needed for image generation
|
||||
self.processor = None
|
||||
if not self._qwen_image_keep_visual:
|
||||
text_encoder.model.visual = None
|
||||
|
||||
if self.model_config.auto_memory:
|
||||
MemoryManager.attach(text_encoder, self.device_torch)
|
||||
|
||||
text_encoder.to(self.device_torch, dtype=dtype)
|
||||
flush()
|
||||
|
||||
if self.model_config.quantize_te:
|
||||
self.print_and_status_update("Quantizing Text Encoder")
|
||||
quantize(text_encoder, weights=get_qtype(
|
||||
self.model_config.qtype_te))
|
||||
quantize(text_encoder, weights=get_qtype(self.model_config.qtype_te))
|
||||
freeze(text_encoder)
|
||||
flush()
|
||||
|
||||
self.print_and_status_update("Loading VAE")
|
||||
vae = AutoencoderKLQwenImage.from_pretrained(
|
||||
base_model_path, subfolder="vae", torch_dtype=dtype)
|
||||
base_model_path, subfolder="vae", torch_dtype=dtype
|
||||
)
|
||||
|
||||
self.noise_scheduler = QwenImageModel.get_train_scheduler()
|
||||
|
||||
self.print_and_status_update("Making pipe")
|
||||
|
||||
|
||||
kwargs = {}
|
||||
|
||||
|
||||
if self._qwen_image_keep_visual:
|
||||
try:
|
||||
self.processor = Qwen2VLProcessor.from_pretrained(
|
||||
@@ -152,7 +179,7 @@ class QwenImageModel(BaseModel):
|
||||
self.processor = Qwen2VLProcessor.from_pretrained(
|
||||
base_model_path, subfolder="processor"
|
||||
)
|
||||
kwargs['processor'] = self.processor
|
||||
kwargs["processor"] = self.processor
|
||||
|
||||
pipe: QwenImagePipeline = self._qwen_pipeline(
|
||||
scheduler=self.noise_scheduler,
|
||||
@@ -160,7 +187,7 @@ class QwenImageModel(BaseModel):
|
||||
tokenizer=tokenizer,
|
||||
vae=vae,
|
||||
transformer=None,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
)
|
||||
# for quantization, it works best to do these after making the pipe
|
||||
pipe.text_encoder = text_encoder
|
||||
@@ -198,7 +225,7 @@ class QwenImageModel(BaseModel):
|
||||
text_encoder=unwrap_model(self.text_encoder[0]),
|
||||
tokenizer=self.tokenizer[0],
|
||||
vae=unwrap_model(self.vae),
|
||||
transformer=unwrap_model(self.transformer)
|
||||
transformer=unwrap_model(self.transformer),
|
||||
)
|
||||
|
||||
pipeline = pipeline.to(self.device_torch)
|
||||
@@ -231,22 +258,27 @@ class QwenImageModel(BaseModel):
|
||||
|
||||
# flush for low vram if we are doing that
|
||||
flush_between_steps = self.model_config.low_vram
|
||||
# Fix a bug in diffusers/torch
|
||||
|
||||
# Fix a bug in diffusers/torch
|
||||
def callback_on_step_end(pipe, i, t, callback_kwargs):
|
||||
if flush_between_steps:
|
||||
flush()
|
||||
latents = callback_kwargs["latents"]
|
||||
|
||||
|
||||
return {"latents": latents}
|
||||
|
||||
|
||||
sc = self.get_bucket_divisibility()
|
||||
gen_config.width = int(gen_config.width // sc * sc)
|
||||
gen_config.width = int(gen_config.width // sc * sc)
|
||||
gen_config.height = int(gen_config.height // sc * sc)
|
||||
img = pipeline(
|
||||
prompt_embeds=conditional_embeds.text_embeds,
|
||||
prompt_embeds_mask=conditional_embeds.attention_mask.to(self.device_torch, dtype=torch.int64),
|
||||
prompt_embeds_mask=conditional_embeds.attention_mask.to(
|
||||
self.device_torch, dtype=torch.int64
|
||||
),
|
||||
negative_prompt_embeds=unconditional_embeds.text_embeds,
|
||||
negative_prompt_embeds_mask=unconditional_embeds.attention_mask.to(self.device_torch, dtype=torch.int64),
|
||||
negative_prompt_embeds_mask=unconditional_embeds.attention_mask.to(
|
||||
self.device_torch, dtype=torch.int64
|
||||
),
|
||||
height=gen_config.height,
|
||||
width=gen_config.width,
|
||||
num_inference_steps=gen_config.num_inference_steps,
|
||||
@@ -254,7 +286,7 @@ class QwenImageModel(BaseModel):
|
||||
latents=gen_config.latents,
|
||||
generator=generator,
|
||||
callback_on_step_end=callback_on_step_end,
|
||||
**extra
|
||||
**extra,
|
||||
).images[0]
|
||||
return img
|
||||
|
||||
@@ -263,28 +295,36 @@ class QwenImageModel(BaseModel):
|
||||
latent_model_input: torch.Tensor,
|
||||
timestep: torch.Tensor, # 0 to 1000 scale
|
||||
text_embeddings: PromptEmbeds,
|
||||
**kwargs
|
||||
**kwargs,
|
||||
):
|
||||
self.model.to(self.device_torch)
|
||||
batch_size, num_channels_latents, height, width = latent_model_input.shape
|
||||
|
||||
|
||||
ps = self.transformer.config.patch_size
|
||||
|
||||
# pack image tokens
|
||||
latent_model_input = latent_model_input.view(batch_size, num_channels_latents, height // ps, ps, width // ps, ps)
|
||||
latent_model_input = latent_model_input.view(
|
||||
batch_size, num_channels_latents, height // ps, ps, width // ps, ps
|
||||
)
|
||||
latent_model_input = latent_model_input.permute(0, 2, 4, 1, 3, 5)
|
||||
latent_model_input = latent_model_input.reshape(batch_size, (height // ps) * (width // ps), num_channels_latents * (ps * ps))
|
||||
latent_model_input = latent_model_input.reshape(
|
||||
batch_size, (height // ps) * (width // ps), num_channels_latents * (ps * ps)
|
||||
)
|
||||
|
||||
# img_shapes passed to the model
|
||||
img_h2, img_w2 = height // ps, width // ps
|
||||
img_shapes = [[(1, img_h2, img_w2)]] * batch_size
|
||||
|
||||
enc_hs = text_embeddings.text_embeds.to(self.device_torch, self.torch_dtype)
|
||||
prompt_embeds_mask = text_embeddings.attention_mask.to(self.device_torch, dtype=torch.int64)
|
||||
prompt_embeds_mask = text_embeddings.attention_mask.to(
|
||||
self.device_torch, dtype=torch.int64
|
||||
)
|
||||
txt_seq_lens = prompt_embeds_mask.sum(dim=1).tolist()
|
||||
|
||||
noise_pred = self.transformer(
|
||||
hidden_states=latent_model_input.to(self.device_torch, self.torch_dtype).detach(),
|
||||
hidden_states=latent_model_input.to(
|
||||
self.device_torch, self.torch_dtype
|
||||
).detach(),
|
||||
timestep=(timestep / 1000).detach(),
|
||||
guidance=None,
|
||||
encoder_hidden_states=enc_hs.detach(),
|
||||
@@ -296,56 +336,55 @@ class QwenImageModel(BaseModel):
|
||||
)[0]
|
||||
|
||||
# unpack
|
||||
noise_pred = noise_pred.view(batch_size, height // ps, width // ps, num_channels_latents, ps, ps)
|
||||
noise_pred = noise_pred.view(
|
||||
batch_size, height // ps, width // ps, num_channels_latents, ps, ps
|
||||
)
|
||||
noise_pred = noise_pred.permute(0, 3, 1, 4, 2, 5)
|
||||
noise_pred = noise_pred.reshape(batch_size, num_channels_latents, height, width)
|
||||
return noise_pred
|
||||
|
||||
|
||||
def get_prompt_embeds(self, prompt: str) -> PromptEmbeds:
|
||||
if self.pipeline.text_encoder.device != self.device_torch:
|
||||
self.pipeline.text_encoder.to(self.device_torch)
|
||||
|
||||
|
||||
prompt_embeds, prompt_embeds_mask = self.pipeline.encode_prompt(
|
||||
prompt,
|
||||
device=self.device_torch,
|
||||
num_images_per_prompt=1,
|
||||
)
|
||||
pe = PromptEmbeds(
|
||||
prompt_embeds
|
||||
)
|
||||
pe = PromptEmbeds(prompt_embeds)
|
||||
pe.attention_mask = prompt_embeds_mask
|
||||
return pe
|
||||
|
||||
|
||||
def get_model_has_grad(self):
|
||||
return False
|
||||
|
||||
def get_te_has_grad(self):
|
||||
return False
|
||||
|
||||
|
||||
def save_model(self, output_path, meta, save_dtype):
|
||||
# only save the unet
|
||||
transformer: QwenImageTransformer2DModel = unwrap_model(self.model)
|
||||
transformer.save_pretrained(
|
||||
save_directory=os.path.join(output_path, 'transformer'),
|
||||
save_directory=os.path.join(output_path, "transformer"),
|
||||
safe_serialization=True,
|
||||
)
|
||||
|
||||
meta_path = os.path.join(output_path, 'aitk_meta.yaml')
|
||||
with open(meta_path, 'w') as f:
|
||||
meta_path = os.path.join(output_path, "aitk_meta.yaml")
|
||||
with open(meta_path, "w") as f:
|
||||
yaml.dump(meta, f)
|
||||
|
||||
def get_loss_target(self, *args, **kwargs):
|
||||
noise = kwargs.get('noise')
|
||||
batch = kwargs.get('batch')
|
||||
noise = kwargs.get("noise")
|
||||
batch = kwargs.get("batch")
|
||||
return (noise - batch.latents).detach()
|
||||
|
||||
|
||||
def get_base_model_version(self):
|
||||
return "qwen_image"
|
||||
|
||||
|
||||
def get_transformer_block_names(self) -> Optional[List[str]]:
|
||||
return ['transformer_blocks']
|
||||
|
||||
return ["transformer_blocks"]
|
||||
|
||||
def convert_lora_weights_before_save(self, state_dict):
|
||||
new_sd = {}
|
||||
for key, value in state_dict.items():
|
||||
@@ -359,20 +398,15 @@ class QwenImageModel(BaseModel):
|
||||
new_key = key.replace("diffusion_model.", "transformer.")
|
||||
new_sd[new_key] = value
|
||||
return new_sd
|
||||
|
||||
def encode_images(
|
||||
self,
|
||||
image_list: List[torch.Tensor],
|
||||
device=None,
|
||||
dtype=None
|
||||
):
|
||||
|
||||
def encode_images(self, image_list: List[torch.Tensor], device=None, dtype=None):
|
||||
if device is None:
|
||||
device = self.vae_device_torch
|
||||
if dtype is None:
|
||||
dtype = self.vae_torch_dtype
|
||||
|
||||
# Move to vae to device if on cpu
|
||||
if self.vae.device == 'cpu':
|
||||
if self.vae.device == "cpu":
|
||||
self.vae.to(device)
|
||||
self.vae.eval()
|
||||
self.vae.requires_grad_(False)
|
||||
@@ -383,20 +417,19 @@ class QwenImageModel(BaseModel):
|
||||
|
||||
images = images.unsqueeze(2)
|
||||
latents = self.vae.encode(images).latent_dist.sample()
|
||||
|
||||
|
||||
latents_mean = (
|
||||
torch.tensor(self.vae.config.latents_mean)
|
||||
.view(1, self.vae.config.z_dim, 1, 1, 1)
|
||||
.to(latents.device, latents.dtype)
|
||||
)
|
||||
latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(1, self.vae.config.z_dim, 1, 1, 1).to(
|
||||
latents.device, latents.dtype
|
||||
)
|
||||
|
||||
latents_std = 1.0 / torch.tensor(self.vae.config.latents_std).view(
|
||||
1, self.vae.config.z_dim, 1, 1, 1
|
||||
).to(latents.device, latents.dtype)
|
||||
|
||||
latents = (latents - latents_mean) * latents_std
|
||||
latents = latents.to(device, dtype=dtype)
|
||||
|
||||
|
||||
|
||||
latents = latents.squeeze(2) # remove the frame count dimension
|
||||
|
||||
return latents
|
||||
return latents
|
||||
|
||||
Reference in New Issue
Block a user