Browse Source

- GIF Support

- GPU Support
- Multiple GPU Support
- CLI Arguments
- Rook Implementation
master
Iván Bravo Bravo 3 years ago
parent
commit
562e740ea1
  1. 2
      .env.example
  2. 28
      .gitignore
  3. 0
      LICENSE.md
  4. 515
      gan.py
  5. 116
      main.py
  6. BIN
      opencv_transform/__pycache__/__init__.cpython-36.pyc
  7. BIN
      opencv_transform/__pycache__/annotation.cpython-36.pyc
  8. BIN
      opencv_transform/__pycache__/dress_to_correct.cpython-36.pyc
  9. BIN
      opencv_transform/__pycache__/mask_to_maskref.cpython-36.pyc
  10. BIN
      opencv_transform/__pycache__/maskdet_to_maskfin.cpython-36.pyc
  11. BIN
      opencv_transform/__pycache__/nude_to_watermark.cpython-36.pyc
  12. 12
      opencv_transform/maskdet_to_maskfin.py
  13. 33
      opencv_transform/nude_to_watermark.py
  14. 10
      requirements-generic.txt
  15. 14
      requirements-mac.txt
  16. 10
      requirements-ubuntu.txt
  17. 10
      requirements-windows.txt
  18. 321
      run.py

2
.env.example

@ -0,0 +1,2 @@ @@ -0,0 +1,2 @@
SENTRY_DSN =
ROOKOUT_TOKEN =

28
.gitignore vendored

@ -1 +1,27 @@ @@ -1 +1,27 @@
checkpoints/*.lib
# Logs
logs
*.log
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Cache
__pycache__
# dotenv environment variables file
.env
# Checkpoints
checkpoints/
# Build
build/
# Build Configuration
cli.spec
# Output file
output.png

0
license.txt → LICENSE.md

515
gan.py

@ -6,231 +6,364 @@ import torch @@ -6,231 +6,364 @@ import torch
import io
import os
import functools
from collections import OrderedDict
class DataLoader():
def __init__(self, opt, cv_img):
super(DataLoader, self).__init__()
class DataLoader:
def __init__(self, opt, cv_img):
super(DataLoader, self).__init__()
self.dataset = Dataset()
self.dataset.initialize(opt, cv_img)
self.dataset = Dataset()
self.dataset.initialize(opt, cv_img)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads))
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads),
)
def load_data(self):
return self.dataloader
def load_data(self):
return self.dataloader
def __len__(self):
return 1
def __len__(self):
return 1
class Dataset(torch.utils.data.Dataset):
def __init__(self):
super(Dataset, self).__init__()
def __init__(self):
super(Dataset, self).__init__()
def initialize(self, opt, cv_img):
self.opt = opt
self.root = opt.dataroot
def initialize(self, opt, cv_img):
self.opt = opt
self.root = opt.dataroot
self.A = Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))
self.dataset_size = 1
self.A = Image.fromarray(cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB))
self.dataset_size = 1
def __getitem__(self, index):
def __getitem__(self, index):
transform_A = get_transform(self.opt)
A_tensor = transform_A(self.A.convert('RGB'))
transform_A = get_transform(self.opt)
A_tensor = transform_A(self.A.convert("RGB"))
B_tensor = inst_tensor = feat_tensor = 0
B_tensor = inst_tensor = feat_tensor = 0
input_dict = {'label': A_tensor, 'inst': inst_tensor, 'image': B_tensor,
'feat': feat_tensor, 'path': ""}
input_dict = {
"label": A_tensor,
"inst": inst_tensor,
"image": B_tensor,
"feat": feat_tensor,
"path": "",
}
return input_dict
return input_dict
def __len__(self):
return 1
def __len__(self):
return 1
class DeepModel(torch.nn.Module):
def initialize(self, opt, gpu_ids):
self.opt = opt
if gpu_ids is None:
self.gpu_ids = []
else:
self.gpu_ids = gpu_ids
self.netG = self.__define_G(
opt.input_nc,
opt.output_nc,
opt.ngf,
opt.netG,
opt.n_downsample_global,
opt.n_blocks_global,
opt.n_local_enhancers,
opt.n_blocks_local,
opt.norm,
self.gpu_ids,
)
# load networks
self.__load_network(self.netG)
def inference(self, label, inst):
# Encode Inputs
input_label, inst_map, _, _ = self.__encode_input(label, inst, infer=True)
# Fake Generation
input_concat = input_label
with torch.no_grad():
fake_image = self.netG.forward(input_concat)
return fake_image
# helper loading function that can be used by subclasses
def __load_network(self, network):
save_path = os.path.join(self.opt.checkpoints_dir)
state_dict = torch.load(save_path)
if len(self.gpu_ids) > 1:
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = "module." + k # add `module.`
new_state_dict[name] = v
else:
new_state_dict = state_dict
network.load_state_dict(new_state_dict)
def __encode_input(
self, label_map, inst_map=None, real_image=None, feat_map=None, infer=False
):
if len(self.gpu_ids) > 0:
input_label = label_map.data.cuda() # GPU
else:
input_label = label_map.data # CPU
return input_label, inst_map, real_image, feat_map
def __weights_init(self, m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find("BatchNorm2d") != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def __define_G(
self,
input_nc,
output_nc,
ngf,
netG,
n_downsample_global=3,
n_blocks_global=9,
n_local_enhancers=1,
n_blocks_local=3,
norm="instance",
gpu_ids=[],
):
norm_layer = self.__get_norm_layer(norm_type=norm)
# model
netG = GlobalGenerator(
input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer
)
if len(gpu_ids) > 1:
# print(
# "Using",
# len(gpu_ids),
# "of",
# torch.cuda.device_count(),
# "GPUs available.",
# )
netG = torch.nn.DataParallel(netG, gpu_ids)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
netG.apply(self.__weights_init)
return netG
def __get_norm_layer(self, norm_type="instance"):
norm_layer = functools.partial(torch.nn.InstanceNorm2d, affine=False)
return norm_layer
def initialize(self, opt):
self.opt = opt
self.gpu_ids = [] #FIX CPU
self.netG = self.__define_G(opt.input_nc, opt.output_nc, opt.ngf, opt.netG,
opt.n_downsample_global, opt.n_blocks_global, opt.n_local_enhancers,
opt.n_blocks_local, opt.norm, self.gpu_ids)
# load networks
self.__load_network(self.netG)
def inference(self, label, inst):
# Encode Inputs
input_label, inst_map, _, _ = self.__encode_input(label, inst, infer=True)
# Fake Generation
input_concat = input_label
with torch.no_grad():
fake_image = self.netG.forward(input_concat)
return fake_image
# helper loading function that can be used by subclasses
def __load_network(self, network):
save_path = os.path.join(self.opt.checkpoints_dir)
network.load_state_dict(torch.load(save_path))
def __encode_input(self, label_map, inst_map=None, real_image=None, feat_map=None, infer=False):
if (len(self.gpu_ids) > 0):
input_label = label_map.data.cuda() #GPU
else:
input_label = label_map.data #CPU
return input_label, inst_map, real_image, feat_map
def __weights_init(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm2d') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def __define_G(self, input_nc, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9, n_local_enhancers=1,
n_blocks_local=3, norm='instance', gpu_ids=[]):
norm_layer = self.__get_norm_layer(norm_type=norm)
netG = GlobalGenerator(input_nc, output_nc, ngf, n_downsample_global, n_blocks_global, norm_layer)
if len(gpu_ids) > 0:
netG.cuda(gpu_ids[0])
netG.apply(self.__weights_init)
return netG
def __get_norm_layer(self, norm_type='instance'):
norm_layer = functools.partial(torch.nn.InstanceNorm2d, affine=False)
return norm_layer
##############################################################################
# Generator
##############################################################################
class GlobalGenerator(torch.nn.Module):
def __init__(self, input_nc, output_nc, ngf=64, n_downsampling=3, n_blocks=9, norm_layer=torch.nn.BatchNorm2d,
padding_type='reflect'):
assert(n_blocks >= 0)
super(GlobalGenerator, self).__init__()
activation = torch.nn.ReLU(True)
model = [torch.nn.ReflectionPad2d(3), torch.nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0), norm_layer(ngf), activation]
### downsample
for i in range(n_downsampling):
mult = 2**i
model += [torch.nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),
norm_layer(ngf * mult * 2), activation]
### resnet blocks
mult = 2**n_downsampling
for i in range(n_blocks):
model += [ResnetBlock(ngf * mult, padding_type=padding_type, activation=activation, norm_layer=norm_layer)]
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
model += [torch.nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1),
norm_layer(int(ngf * mult / 2)), activation]
model += [torch.nn.ReflectionPad2d(3), torch.nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), torch.nn.Tanh()]
self.model = torch.nn.Sequential(*model)
def forward(self, input):
return self.model(input)
def __init__(
self,
input_nc,
output_nc,
ngf=64,
n_downsampling=3,
n_blocks=9,
norm_layer=torch.nn.BatchNorm2d,
padding_type="reflect",
):
assert n_blocks >= 0
super(GlobalGenerator, self).__init__()
activation = torch.nn.ReLU(True)
# activation = torch.nn.DataParallel(activation)
model = [
torch.nn.ReflectionPad2d(3),
torch.nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),
norm_layer(ngf),
activation,
]
# downsample
for i in range(n_downsampling):
mult = 2 ** i
model += [
torch.nn.Conv2d(
ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1
),
norm_layer(ngf * mult * 2),
activation,
]
# resnet blocks
mult = 2 ** n_downsampling
for i in range(n_blocks):
model += [
ResnetBlock(
ngf * mult,
padding_type=padding_type,
activation=activation,
norm_layer=norm_layer,
)
]
# upsample
for i in range(n_downsampling):
mult = 2 ** (n_downsampling - i)
model += [
torch.nn.ConvTranspose2d(
ngf * mult,
int(ngf * mult / 2),
kernel_size=3,
stride=2,
padding=1,
output_padding=1,
),
norm_layer(int(ngf * mult / 2)),
activation,
]
model += [
torch.nn.ReflectionPad2d(3),
torch.nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0),
torch.nn.Tanh(),
]
self.model = torch.nn.Sequential(*model)
# self.model = torch.nn.DataParallel(self.model)
def forward(self, input):
return self.model(input)
# Define a resnet block
class ResnetBlock(torch.nn.Module):
def __init__(self, dim, padding_type, norm_layer, activation=torch.nn.ReLU(True), use_dropout=False):
super(ResnetBlock, self).__init__()
self.conv_block = self.__build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)
def __build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation]
if use_dropout:
conv_block += [torch.nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim)]
return torch.nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
def __init__(
self,
dim,
padding_type,
norm_layer,
activation=torch.nn.ReLU(True),
use_dropout=False,
):
super(ResnetBlock, self).__init__()
self.conv_block = self.__build_conv_block(
dim, padding_type, norm_layer, activation, use_dropout
)
def __build_conv_block(
self, dim, padding_type, norm_layer, activation, use_dropout
):
conv_block = []
p = 0
if padding_type == "reflect":
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
activation,
]
if use_dropout:
conv_block += [torch.nn.Dropout(0.5)]
p = 0
if padding_type == "reflect":
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
]
return torch.nn.Sequential(*conv_block)
def forward(self, x):
out = x + self.conv_block(x)
return out
# Data utils:
def get_transform(opt, method=Image.BICUBIC, normalize=True):
transform_list = []
transform_list = []
base = float(2 ** opt.n_downsample_global)
if opt.netG == 'local':
base *= (2 ** opt.n_local_enhancers)
transform_list.append(transforms.Lambda(lambda img: __make_power_2(img, base, method)))
base = float(2 ** opt.n_downsample_global)
if opt.netG == "local":
base *= 2 ** opt.n_local_enhancers
transform_list.append(
transforms.Lambda(lambda img: __make_power_2(img, base, method))
)
transform_list += [transforms.ToTensor()]
transform_list += [transforms.ToTensor()]
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
if normalize:
transform_list += [transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
def __make_power_2(img, base, method=Image.BICUBIC):
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
ow, oh = img.size
h = int(round(oh / base) * base)
w = int(round(ow / base) * base)
if (h == oh) and (w == ow):
return img
return img.resize((w, h), method)
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:,:,0]
return image_numpy.astype(imtype)
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:, :, 0]
return image_numpy.astype(imtype)

116
main.py

@ -1,7 +1,53 @@ @@ -1,7 +1,53 @@
import shutil
import sys
import argparse
import tempfile
import cv2
import time
import os
import imageio
import sentry_sdk
import rook
from run import process
from run import process, process_gif
from multiprocessing import freeze_support
from dotenv import load_dotenv
#
load_dotenv()
parser = argparse.ArgumentParser()
parser.add_argument(
"-i", "--input", default="input.png", help="path of the photo to transform"
)
parser.add_argument(
"-o",
"--output",
default="output.png",
help="path where the transformed photo will be saved. (default: output.png or output.gif)",
)
parser.add_argument(
"--cpu",
default=False,
action="store_true",
help="force photo processing with CPU (slower)",
)
parser.add_argument(
"--gpu",
action="append",
type=int,
help="ID of the GPU to use for processing. It can be used multiple times to specify multiple GPUs (Example: --gpu 0 --gpu 1 --gpu 2) This argument will be ignored if --cpu is active. (default: 0)",
)
parser.add_argument(
"--enablepubes",
action="store_true",
default=False,
help="generates pubic hair on output image",
)
parser.add_argument(
"--gif", action="store_true", default=False, help="Run the processing on a gif"
)
args = parser.parse_args()
"""
main.py
@ -13,18 +59,66 @@ main.py @@ -13,18 +59,66 @@ main.py
# ------------------------------------------------- main()
def main():
start = time.time()
gpu_ids = args.gpu
if args.cpu:
gpu_ids = None
elif gpu_ids is None:
gpu_ids = [0]
if not args.gif:
# Read input image
image = cv2.imread(args.input)
# Process
result = process(image, gpu_ids, args.enablepubes)
# Write output image
cv2.imwrite(args.output, result)
else:
gif_imgs = imageio.mimread(args.input)
nums = len(gif_imgs)
print("Total {} frames in the gif!".format(nums))
tmp_dir = tempfile.mkdtemp()
process_gif(gif_imgs, gpu_ids, args.enablepubes, tmp_dir)
print("Creating gif")
imageio.mimsave(
args.output if args.output != "output.png" else "output.gif",
[
imageio.imread(os.path.join(tmp_dir, "output_{}.jpg".format(i)))
for i in range(nums)
],
)
shutil.rmtree(tmp_dir)
end = time.time()
duration = end - start
# Done
print("Done! We have taken", round(duration, 2), "seconds")
# Exit
sys.exit()
def start_sentry():
dsn = os.getenv("SENTRY_DSN")
if dsn:
sentry_sdk.init(dsn=dsn)
#Read input image
dress = cv2.imread("input.png")
#Process
watermark = process(dress)
def start_rook():
token = os.getenv("ROOKOUT_TOKEN")
# Write output image
cv2.imwrite("output.png", watermark)
if token:
rook.start(token=token)
#Exit
sys.exit()
if __name__ == '__main__':
main()
if __name__ == "__main__":
freeze_support()
start_sentry()
start_rook()
main()

BIN
opencv_transform/__pycache__/__init__.cpython-36.pyc

Binary file not shown.

BIN
opencv_transform/__pycache__/annotation.cpython-36.pyc

Binary file not shown.

BIN
opencv_transform/__pycache__/dress_to_correct.cpython-36.pyc

Binary file not shown.

BIN
opencv_transform/__pycache__/mask_to_maskref.cpython-36.pyc

Binary file not shown.

BIN
opencv_transform/__pycache__/maskdet_to_maskfin.cpython-36.pyc

Binary file not shown.

BIN
opencv_transform/__pycache__/nude_to_watermark.cpython-36.pyc

Binary file not shown.

12
opencv_transform/maskdet_to_maskfin.py

@ -24,14 +24,14 @@ from opencv_transform.annotation import BodyPart @@ -24,14 +24,14 @@ from opencv_transform.annotation import BodyPart
# create_maskfin ==============================================================================
# return:
# (<Boolean> True/False), depending on the transformation process
def create_maskfin(maskref, maskdet):
def create_maskfin(maskref, maskdet, enable_pubes):
#Create a total green image, in which draw details ellipses
details = np.zeros((512,512,3), np.uint8)
details[:,:,:] = (0,255,0) # (B, G, R)
#Extract body part features:
bodypart_list = extractAnnotations(maskdet);
bodypart_list = extractAnnotations(maskdet, enable_pubes);
#Check if the list is not empty:
if bodypart_list:
@ -92,7 +92,7 @@ def create_maskfin(maskref, maskdet): @@ -92,7 +92,7 @@ def create_maskfin(maskref, maskdet):
# (<string> maskdet_img): relative path of the single maskdet image (es: testimg1/maskdet/1.png)
# return:
# (<BodyPart []> bodypart_list) - for failure/error, return an empty list []
def extractAnnotations(maskdet):
def extractAnnotations(maskdet, enable_pubes):
#Load the image
#image = cv2.imread(maskdet_img)
@ -124,7 +124,7 @@ def extractAnnotations(maskdet): @@ -124,7 +124,7 @@ def extractAnnotations(maskdet):
nip_list = inferNip(aur_list)
#Infer the hair:
hair_list = inferHair(vag_list)
hair_list = inferHair(vag_list, enable_pubes)
#Return a combined list:
return tits_list + aur_list + nip_list + vag_list + hair_list + belly_list
@ -492,11 +492,11 @@ def inferNip(aur_list): @@ -492,11 +492,11 @@ def inferNip(aur_list):
# (<BodyPart[]> vag list)
# return
# (<BodyPart[]> hair list)
def inferHair(vag_list):
def inferHair(vag_list, enable):
hair_list = []
#70% of chanche to add hair
if random.uniform(0.0, 1.0) > 0.3:
if enable:
for vag in vag_list:

33
opencv_transform/nude_to_watermark.py

@ -4,23 +4,26 @@ import os @@ -4,23 +4,26 @@ import os
# create_watermark ===============================================================
# return:
# (<Boolean> True/False), depending on the transformation process
# (<Boolean> True/False), depending on the transformation process
def create_watermark(nude):
# Add alpha channel if missing
if nude.shape[2] < 4:
nude = np.dstack([nude, np.ones((512, 512), dtype="uint8") * 255])
# Add alpha channel if missing
if nude.shape[2] < 4:
nude = np.dstack([nude, np.ones((512, 512), dtype="uint8") * 255])
watermark = cv2.imread("fake.png", cv2.IMREAD_UNCHANGED)
f1 = np.asarray([0, 0, 0, 250]) # red color filter
f2 = np.asarray([255, 255, 255, 255])
mask = cv2.bitwise_not(cv2.inRange(watermark, f1, f2))
mask_inv = cv2.bitwise_not(mask)
return nude
res1 = cv2.bitwise_and(nude, nude, mask = mask)
res2 = cv2.bitwise_and(watermark, watermark, mask = mask_inv)
res = cv2.add(res1, res2)
watermark = cv2.imread("fake.png", cv2.IMREAD_UNCHANGED)
f1 = np.asarray([0, 0, 0, 250]) # red color filter
f2 = np.asarray([255, 255, 255, 255])
mask = cv2.bitwise_not(cv2.inRange(watermark, f1, f2))
mask_inv = cv2.bitwise_not(mask)
res1 = cv2.bitwise_and(nude, nude, mask=mask)
res2 = cv2.bitwise_and(watermark, watermark, mask=mask_inv)
res = cv2.add(res1, res2)
alpha = 0.6
return cv2.addWeighted(res, alpha, nude, 1 - alpha, 0)
alpha = 0.6
return cv2.addWeighted(res, alpha, nude, 1 - alpha, 0)

10
requirements-generic.txt

@ -0,0 +1,10 @@ @@ -0,0 +1,10 @@
numpy==1.16.4
pillow==6.1.0
opencv-python==4.1.0.25
rsa==4.0
torchvision==0.2.2.post3
torch==1.1.0
imageio==2.5.0
python-dotenv==0.10.3
rook==0.1.73
sentry-sdk==0.10.2

14
requirements-mac.txt

@ -0,0 +1,14 @@ @@ -0,0 +1,14 @@
# NOTE:
# Torch on Mac does not support CUDA!
# To include GPU processing you will need to compile Torch.
numpy==1.16.4
pillow==6.1.0
opencv-python==4.1.0.25
rsa==4.0
torch==1.1.0
torchvision==0.2.2.post3
imageio==2.5.0
python-dotenv==0.10.3
rook==0.1.73
sentry-sdk==0.10.2

10
requirements-ubuntu.txt

@ -0,0 +1,10 @@ @@ -0,0 +1,10 @@
numpy==1.16.4
pillow==6.1.0
opencv-python==4.1.0.25
rsa==4.0
https://download.pytorch.org/whl/cu100/torch-1.1.0-cp36-cp36m-linux_x86_64.whl
torchvision==0.2.2.post3
imageio==2.5.0
python-dotenv==0.10.3
rook==0.1.73
sentry-sdk==0.10.2

10
requirements-windows.txt

@ -0,0 +1,10 @@ @@ -0,0 +1,10 @@
numpy==1.16.4
pillow==6.1.0
opencv-python==4.1.0.25
rsa==4.0
https://download.pytorch.org/whl/cu100/torch-1.1.0-cp36-cp36m-win_amd64.whl
torchvision==0.2.2.post3
imageio==2.5.0
python-dotenv==0.10.3
rook==0.1.73
sentry-sdk==0.10.2

321
run.py

@ -1,9 +1,11 @@ @@ -1,9 +1,11 @@
import cv2
import os
#Import Neural Network Model
# Import Neural Network Model
from gan import DataLoader, DeepModel, tensor2im
#OpenCv Transform:
# OpenCv Transform:
from multiprocessing.pool import ThreadPool
from opencv_transform.mask_to_maskref import create_maskref
from opencv_transform.maskdet_to_maskfin import create_maskfin
from opencv_transform.dress_to_correct import create_correct
@ -15,136 +17,199 @@ run.py @@ -15,136 +17,199 @@ run.py
This script manage the entire transormation.
Transformation happens in 6 phases:
0: dress -> correct [opencv] dress_to_correct
1: correct -> mask: [GAN] correct_to_mask
2: mask -> maskref [opencv] mask_to_maskref
3: maskref -> maskdet [GAN] maskref_to_maskdet
4: maskdet -> maskfin [opencv] maskdet_to_maskfin
5: maskfin -> nude [GAN] maskfin_to_nude
6: nude -> watermark [opencv] nude_to_watermark
0: dress -> correct [opencv] dress_to_correct
1: correct -> mask: [GAN] correct_to_mask
2: mask -> maskref [opencv] mask_to_maskref
3: maskref -> maskdet [GAN] maskref_to_maskdet
4: maskdet -> maskfin [opencv] maskdet_to_maskfin
5: maskfin -> nude [GAN] maskfin_to_nude
6: nude -> watermark [opencv] nude_to_watermark
"""
phases = ["dress_to_correct", "correct_to_mask", "mask_to_maskref", "maskref_to_maskdet", "maskdet_to_maskfin", "maskfin_to_nude", "nude_to_watermark"]
class Options():
#Init options with default values
def __init__(self):
# experiment specifics
self.norm = 'batch' #instance normalization or batch normalization
self.use_dropout = False #use dropout for the generator
self.data_type = 32 #Supported data type i.e. 8, 16, 32 bit
# input/output sizes
self.batchSize = 1 #input batch size
self.input_nc = 3 # of input image channels
self.output_nc = 3 # of output image channels
# for setting inputs
self.serial_batches = True #if true, takes images in order to make batches, otherwise takes them randomly
self.nThreads = 1 ## threads for loading data (???)
self.max_dataset_size = 1 #Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.
# for generator
self.netG = 'global' #selects model to use for netG
self.ngf = 64 ## of gen filters in first conv layer
self.n_downsample_global = 4 #number of downsampling layers in netG
self.n_blocks_global = 9 #number of residual blocks in the global generator network
self.n_blocks_local = 0 #number of residual blocks in the local enhancer network
self.n_local_enhancers = 0 #number of local enhancers to use
self.niter_fix_global = 0 #number of epochs that we only train the outmost local enhancer
#Phase specific options
self.checkpoints_dir = ""
self.dataroot = ""
#Changes options accordlying to actual phase
def updateOptions(self, phase):
if phase == "correct_to_mask":
self.checkpoints_dir = "checkpoints/cm.lib"
elif phase == "maskref_to_maskdet":
self.checkpoints_dir = "checkpoints/mm.lib"
elif phase == "maskfin_to_nude":
self.checkpoints_dir = "checkpoints/mn.lib"
phases = [
"dress_to_correct",
"correct_to_mask",
"mask_to_maskref",
"maskref_to_maskdet",
"maskdet_to_maskfin",
"maskfin_to_nude",
"nude_to_watermark",
]
class Options:
# Init options with default values
def __init__(self):
# experiment specifics
self.norm = "batch" # instance normalization or batch normalization
self.use_dropout = False # use dropout for the generator
self.data_type = 32 # Supported data type i.e. 8, 16, 32 bit
# input/output sizes
self.batchSize = 1 # input batch size
self.input_nc = 3 # of input image channels
self.output_nc = 3 # of output image channels
# for setting inputs
# if true, takes images in order to make batches, otherwise takes them randomly
self.serial_batches = True
self.nThreads = (
0
) # threads for loading data. Keep this value at 0! see: https://github.com/pytorch/pytorch/issues/12831
# Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.
self.max_dataset_size = 1
# for generator
self.netG = "global" # selects model to use for netG
self.ngf = 64 # of gen filters in first conv layer
self.n_downsample_global = 4 # number of downsampling layers in netG
self.n_blocks_global = (
9
) # number of residual blocks in the global generator network
self.n_blocks_local = (
0
) # number of residual blocks in the local enhancer network
self.n_local_enhancers = 0 # number of local enhancers to use
# number of epochs that we only train the outmost local enhancer
self.niter_fix_global = 0
# Phase specific options
self.checkpoints_dir = ""
self.dataroot = ""
# Changes options accordlying to actual phase
def updateOptions(self, phase):
directory = os.path.dirname(os.path.realpath(__file__))
if phase == "correct_to_mask":
self.checkpoints_dir = os.path.join(directory, "checkpoints", "cm.lib")
elif phase == "maskref_to_maskdet":
self.checkpoints_dir = os.path.join(directory, "checkpoints", "mm.lib")
elif phase == "maskfin_to_nude":
self.checkpoints_dir = os.path.join(directory, "checkpoints", "mn.lib")
# process(cv_img, mode)
# return:
# watermark image
def process(cv_img):
#InMemory cv2 images:
dress = cv_img
correct = None
mask = None
maskref = None
maskfin = None
maskdet = None
nude = None
watermark = None
for index, phase in enumerate(phases):
print("Executing phase: " + phase)
#GAN phases:
if (phase == "correct_to_mask") or (phase == "maskref_to_maskdet") or (phase == "maskfin_to_nude"):
#Load global option
opt = Options()
#Load custom phase options:
opt.updateOptions(phase)
#Load Data
if (phase == "correct_to_mask"):
data_loader = DataLoader(opt, correct)
elif (phase == "maskref_to_maskdet"):
data_loader = DataLoader(opt, maskref)
elif (phase == "maskfin_to_nude"):
data_loader = DataLoader(opt, maskfin)
dataset = data_loader.load_data()
#Create Model
model = DeepModel()
model.initialize(opt)
#Run for every image:
for i, data in enumerate(dataset):
generated = model.inference(data['label'], data['inst'])
im = tensor2im(generated.data[0])
#Save Data
if (phase == "correct_to_mask"):
mask = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
elif (phase == "maskref_to_maskdet"):
maskdet = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
elif (phase == "maskfin_to_nude"):
nude = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
#Correcting:
elif (phase == 'dress_to_correct'):
correct = create_correct(dress)
#mask_ref phase (opencv)
elif (phase == "mask_to_maskref"):
maskref = create_maskref(mask, correct)
#mask_fin phase (opencv)
elif (phase == "maskdet_to_maskfin"):
maskfin = create_maskfin(maskref, maskdet)
#nude_to_watermark phase (opencv)
elif (phase == "nude_to_watermark"):
watermark = create_watermark(nude)
return watermark
import sys
def process(cv_img, gpu_ids, enable_pubes):
# InMemory cv2 images:
dress = cv_img
correct = None
mask = None
maskref = None
maskfin = None
maskdet = None
nude = None
watermark = None
print("GPU IDs: " + str(gpu_ids), flush=True)
for index, phase in enumerate(phases):
print("Executing phase: " + phase, flush=True)
# GAN phases:
if (
(phase == "correct_to_mask")
or (phase == "maskref_to_maskdet")
or (phase == "maskfin_to_nude")
):
# Load global option
opt = Options()
# Load custom phase options:
opt.updateOptions(phase)
# Load Data
if phase == "correct_to_mask":
data_loader = DataLoader(opt, correct)
elif phase == "maskref_to_maskdet":
data_loader = DataLoader(opt, maskref)
elif phase == "maskfin_to_nude":
data_loader = DataLoader(opt, maskfin)
dataset = data_loader.load_data()
# Create Model
model = DeepModel()
model.initialize(opt, gpu_ids)
# Run for every image:
for i, data in enumerate(dataset):
generated = model.inference(data["label"], data["inst"])
im = tensor2im(generated.data[0])
# Save Data
if phase == "correct_to_mask":
mask = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
cv2.imwrite("mask.png", mask)
elif phase == "maskref_to_maskdet":
maskdet = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
cv2.imwrite("maskdet.png", maskdet)
elif phase == "maskfin_to_nude":
nude = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
cv2.imwrite("nude.png", nude)
# Correcting:
elif phase == "dress_to_correct":
correct = create_correct(dress)
cv2.imwrite("correct.png", correct)
# mask_ref phase (opencv)
elif phase == "mask_to_maskref":
maskref = create_maskref(mask, correct)
cv2.imwrite("maskref.png", maskref)
# mask_fin phase (opencv)
elif phase == "maskdet_to_maskfin":
maskfin = create_maskfin(maskref, maskdet, enable_pubes)
cv2.imwrite("maskfin.png", maskfin)
# nude_to_watermark phase (opencv)
elif phase == "nude_to_watermark":
watermark = create_watermark(nude)
return watermark
# process(cv_img, mode)
# return:
# gif
def process_gif(gif_imgs, gpu_ids, enable_pubes, tmp_dir):
def process_one_image(a):
print("Processing image : {}/{}".format(a[1] + 1, len(gif_imgs)))
img = cv2.resize(a[0], (512, 512))
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
cv2.imwrite(
os.path.join(tmp_dir, "output_{}.jpg".format(a[1])),
process(img, gpu_ids, enable_pubes),
)
print(gpu_ids)
if (
gpu_ids is None
): # Only multithreading with CPU because threads cause crashes with GPU
pool = ThreadPool(4)
pool.map(process_one_image, zip(gif_imgs, range(len(gif_imgs))))
pool.close()
pool.join()
else:
for x in zip(gif_imgs, range(len(gif_imgs))):
process_one_image(x)

Loading…
Cancel
Save