mirror of
https://github.com/lllyasviel/stable-diffusion-webui-forge.git
synced 2026-03-09 06:59:48 +00:00
Cnet (#20)
* ini * i * i * i * i * i * i * Update preprocessor_normalbae.py * i * i * i * i * Update README.md * i * Update preprocessor_depth.py * Update shared.py * Update devices.py * i * i * i * i * i * i * Update README.md * i * Update README.md * i * Update annotator_path.py * i * i * Update preprocessor_meta.py
This commit is contained in:
@@ -0,0 +1,72 @@
|
||||
from modules_forge.shared import Preprocessor, PreprocessorParameter, preprocessor_dir, load_file_from_url, add_preprocessor
|
||||
from modules_forge.forge_util import resize_image_with_pad
|
||||
|
||||
import types
|
||||
import torch
|
||||
import numpy as np
|
||||
|
||||
from einops import rearrange
|
||||
from annotator.normalbae.models.NNET import NNET
|
||||
from annotator.normalbae import load_checkpoint
|
||||
from torchvision import transforms
|
||||
|
||||
|
||||
class PreprocessorNormalBae(Preprocessor):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.name = 'normalbae'
|
||||
self.tag = 'NormalMap'
|
||||
self.slider_resolution = PreprocessorParameter(label='Resolution', minimum=128, maximum=2048, value=512, step=8, visible=True)
|
||||
self.slider_1 = PreprocessorParameter(visible=False)
|
||||
self.slider_2 = PreprocessorParameter(visible=False)
|
||||
self.slider_3 = PreprocessorParameter(visible=False)
|
||||
self.show_control_mode = True
|
||||
self.do_not_need_model = False
|
||||
|
||||
def load_model(self):
|
||||
if self.model_patcher is not None:
|
||||
return
|
||||
|
||||
model_path = load_file_from_url(
|
||||
"https://huggingface.co/lllyasviel/Annotators/resolve/main/scannet.pt",
|
||||
model_dir=preprocessor_dir)
|
||||
|
||||
args = types.SimpleNamespace()
|
||||
args.mode = 'client'
|
||||
args.architecture = 'BN'
|
||||
args.pretrained = 'scannet'
|
||||
args.sampling_ratio = 0.4
|
||||
args.importance_ratio = 0.7
|
||||
model = NNET(args)
|
||||
model = load_checkpoint(model_path, model)
|
||||
self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
|
||||
|
||||
self.model_patcher = self.setup_model_patcher(model)
|
||||
|
||||
def __call__(self, input_image, resolution, slider_1=None, slider_2=None, slider_3=None, **kwargs):
|
||||
input_image, remove_pad = resize_image_with_pad(input_image, resolution)
|
||||
|
||||
self.load_model()
|
||||
|
||||
self.move_all_model_patchers_to_gpu()
|
||||
|
||||
assert input_image.ndim == 3
|
||||
image_normal = input_image
|
||||
|
||||
with torch.no_grad():
|
||||
image_normal = self.send_tensor_to_model_device(torch.from_numpy(image_normal))
|
||||
image_normal = image_normal / 255.0
|
||||
image_normal = rearrange(image_normal, 'h w c -> 1 c h w')
|
||||
image_normal = self.norm(image_normal)
|
||||
|
||||
normal = self.model_patcher.model(image_normal)
|
||||
normal = normal[0][-1][:, :3]
|
||||
normal = ((normal + 1) * 0.5).clip(0, 1)
|
||||
|
||||
normal = rearrange(normal[0], 'c h w -> h w c').cpu().numpy()
|
||||
normal_image = (normal * 255.0).clip(0, 255).astype(np.uint8)
|
||||
|
||||
return remove_pad(normal_image)
|
||||
|
||||
|
||||
add_preprocessor(PreprocessorNormalBae)
|
||||
Reference in New Issue
Block a user