mirror of
https://github.com/Haoming02/sd-webui-old-photo-restoration.git
synced 2026-01-26 19:29:52 +00:00
commit cd7a9c103d1ea981ecd236d4e9111fd3c1cd6c2b Author: Haoming <hmstudy02@gmail.com> Date: Tue Dec 19 11:33:44 2023 +0800 add README commit 30127cbb2a8e5f461c540729dc7ad457f66eb94c Author: Haoming <hmstudy02@gmail.com> Date: Tue Dec 19 11:12:16 2023 +0800 fix Face Enhancement distortion commit 6d52de5368c6cfbd9342465b5238725c186e00b9 Author: Haoming <hmstudy02@gmail.com> Date: Mon Dec 18 18:27:25 2023 +0800 better? args handling commit 0d1938b59eb77a038ee0a91a66b07fb9d7b3d6d4 Author: Haoming <hmstudy02@gmail.com> Date: Mon Dec 18 17:40:19 2023 +0800 bug fix related to Scratch commit 8315cd05ffeb2d651b4c57d70bf04b413ca8901d Author: Haoming <hmstudy02@gmail.com> Date: Mon Dec 18 17:24:52 2023 +0800 implement step 2 ~ 4 commit a5feb04b3980bdd80c6b012a94c743ba48cdfe39 Author: Haoming <hmstudy02@gmail.com> Date: Mon Dec 18 11:55:20 2023 +0800 process scratch commit3b18f7b042Author: Haoming <hmstudy02@gmail.com> Date: Wed Dec 13 11:57:20 2023 +0800 "init" commitd0148e0e82Author: Haoming <hmstudy02@gmail.com> Date: Wed Dec 13 10:34:39 2023 +0800 clone repo
71 lines
2.4 KiB
Python
71 lines
2.4 KiB
Python
# Copyright (c) Microsoft Corporation.
|
|
# Licensed under the MIT License.
|
|
|
|
import torch
|
|
import torch.nn.parallel
|
|
import numpy as np
|
|
import torch.nn as nn
|
|
import torch.nn.functional as F
|
|
|
|
|
|
class Downsample(nn.Module):
|
|
# https://github.com/adobe/antialiased-cnns
|
|
|
|
def __init__(self, pad_type="reflect", filt_size=3, stride=2, channels=None, pad_off=0):
|
|
super(Downsample, self).__init__()
|
|
self.filt_size = filt_size
|
|
self.pad_off = pad_off
|
|
self.pad_sizes = [
|
|
int(1.0 * (filt_size - 1) / 2),
|
|
int(np.ceil(1.0 * (filt_size - 1) / 2)),
|
|
int(1.0 * (filt_size - 1) / 2),
|
|
int(np.ceil(1.0 * (filt_size - 1) / 2)),
|
|
]
|
|
self.pad_sizes = [pad_size + pad_off for pad_size in self.pad_sizes]
|
|
self.stride = stride
|
|
self.off = int((self.stride - 1) / 2.0)
|
|
self.channels = channels
|
|
|
|
# print('Filter size [%i]'%filt_size)
|
|
if self.filt_size == 1:
|
|
a = np.array([1.0,])
|
|
elif self.filt_size == 2:
|
|
a = np.array([1.0, 1.0])
|
|
elif self.filt_size == 3:
|
|
a = np.array([1.0, 2.0, 1.0])
|
|
elif self.filt_size == 4:
|
|
a = np.array([1.0, 3.0, 3.0, 1.0])
|
|
elif self.filt_size == 5:
|
|
a = np.array([1.0, 4.0, 6.0, 4.0, 1.0])
|
|
elif self.filt_size == 6:
|
|
a = np.array([1.0, 5.0, 10.0, 10.0, 5.0, 1.0])
|
|
elif self.filt_size == 7:
|
|
a = np.array([1.0, 6.0, 15.0, 20.0, 15.0, 6.0, 1.0])
|
|
|
|
filt = torch.Tensor(a[:, None] * a[None, :])
|
|
filt = filt / torch.sum(filt)
|
|
self.register_buffer("filt", filt[None, None, :, :].repeat((self.channels, 1, 1, 1)))
|
|
|
|
self.pad = get_pad_layer(pad_type)(self.pad_sizes)
|
|
|
|
def forward(self, inp):
|
|
if self.filt_size == 1:
|
|
if self.pad_off == 0:
|
|
return inp[:, :, :: self.stride, :: self.stride]
|
|
else:
|
|
return self.pad(inp)[:, :, :: self.stride, :: self.stride]
|
|
else:
|
|
return F.conv2d(self.pad(inp), self.filt, stride=self.stride, groups=inp.shape[1])
|
|
|
|
|
|
def get_pad_layer(pad_type):
|
|
if pad_type in ["refl", "reflect"]:
|
|
PadLayer = nn.ReflectionPad2d
|
|
elif pad_type in ["repl", "replicate"]:
|
|
PadLayer = nn.ReplicationPad2d
|
|
elif pad_type == "zero":
|
|
PadLayer = nn.ZeroPad2d
|
|
else:
|
|
print("Pad type [%s] not recognized" % pad_type)
|
|
return PadLayer
|