Browse Source

Ultimate Refactoring

* codeclimate.com rated A :D
master
PommeDroid 3 years ago
parent
commit
be3c79d45b
  1. 3
      argv/__init__.py
  2. 49
      argv/run/__init__.py
  3. 119
      argv/run/argument.py
  4. 65
      argv/run/config.py
  5. 100
      processing/__init__.py
  6. 10
      processing/folder.py
  7. 9
      processing/gif.py
  8. 7
      processing/image.py
  9. 4
      processing/multiple.py
  10. 93
      processing/utils.py
  11. 66
      transform/__init__.py
  12. 56
      transform/gan/generator.py
  13. 42
      transform/gan/mask.py
  14. 94
      transform/opencv/bodypart/__init__.py
  15. 159
      transform/opencv/bodypart/extract.py
  16. 22
      transform/opencv/bodypart/inferrer.py
  17. 94
      transform/opencv/bodypart/resolver.py
  18. 104
      transform/opencv/mask.py
  19. 41
      transform/opencv/resize.py
  20. 30
      utils.py

3
argv/__init__.py

@ -8,7 +8,8 @@ from argv.checkpoints import init_checkpoints_sub_parser, check_args_checkpoints @@ -8,7 +8,8 @@ from argv.checkpoints import init_checkpoints_sub_parser, check_args_checkpoints
from argv.common import arg_help, arg_debug, arg_version
from argv.daemon import init_daemon_sub_parser
from argv.gpu_info import init_gpu_info_sub_parser
from argv.run import init_run_parser, check_args_run_parser, set_args_run_parser
from argv.run import init_run_parser
from argv.run.config import set_args_run_parser, check_args_run_parser
from utils import setup_log, json_to_argv

49
argv/run/__init__.py

@ -0,0 +1,49 @@ @@ -0,0 +1,49 @@
import main
from argv.checkpoints import arg_checkpoints
from argv.common import arg_debug, arg_help, arg_version
from argv.run.argument import arg_altered, arg_auto_rescale, arg_auto_resize, arg_auto_resize_crop, arg_color_transfer, \
arg_cpu, arg_gpu, arg_ignore_size, arg_input, arg_json_args, arg_json_folder_name, arg_n_core, arg_n_run, \
arg_output, arg_overlay, arg_preferences, arg_step
def init_run_parser(subparsers):
run_parser = subparsers.add_parser(
'run',
description="Process image(s) with dreampower.",
help="Process image(s) with dreampower.",
add_help=False
)
run_parser.set_defaults(func=main.main)
# conflicts handler
processing_mod = run_parser.add_mutually_exclusive_group()
scale_mod = run_parser.add_mutually_exclusive_group()
# add run arguments
arg_input(run_parser)
arg_output(run_parser)
arg_auto_rescale(scale_mod)
arg_auto_resize(scale_mod)
arg_auto_resize_crop(scale_mod)
arg_overlay(scale_mod)
arg_ignore_size(scale_mod)
arg_color_transfer(run_parser)
arg_preferences(run_parser)
arg_n_run(run_parser)
arg_step(run_parser)
arg_altered(run_parser)
arg_cpu(processing_mod)
arg_gpu(processing_mod)
arg_checkpoints(run_parser)
arg_n_core(run_parser)
arg_json_args(run_parser)
arg_json_folder_name(run_parser)
arg_help(run_parser)
arg_debug(run_parser)
arg_version(run_parser)

119
argv/run.py → argv/run/argument.py

@ -1,77 +1,7 @@ @@ -1,77 +1,7 @@
import json
import os
import re
from json import JSONDecodeError
import gpu_info
import main
from argv.checkpoints import check_arg_checkpoints, set_arg_checkpoints, arg_checkpoints
from argv.common import arg_debug, arg_help, arg_version
from utils import check_image_file_validity, cv2_supported_extension
def init_run_parser(subparsers):
run_parser = subparsers.add_parser(
'run',
description="Process image(s) with dreampower.",
help="Process image(s) with dreampower.",
add_help=False
)
run_parser.set_defaults(func=main.main)
# conflicts handler
processing_mod = run_parser.add_mutually_exclusive_group()
scale_mod = run_parser.add_mutually_exclusive_group()
# add run arguments
arg_input(run_parser)
arg_output(run_parser)
arg_auto_rescale(scale_mod)
arg_auto_resize(scale_mod)
arg_auto_resize_crop(scale_mod)
arg_overlay(scale_mod)
arg_ignore_size(scale_mod)
arg_color_transfer(run_parser)
arg_preferences(run_parser)
arg_n_run(run_parser)
arg_step(run_parser)
arg_altered(run_parser)
arg_cpu(processing_mod)
arg_gpu(processing_mod)
arg_checkpoints(run_parser)
arg_n_core(run_parser)
arg_json_args(run_parser)
arg_json_folder_name(run_parser)
arg_help(run_parser)
arg_debug(run_parser)
arg_version(run_parser)
def set_args_run_parser(args):
set_arg_checkpoints(args)
set_arg_preference(args)
set_gpu_ids(args)
def check_args_run_parser(parser, args):
check_arg_input(parser, args)
check_arg_output(parser, args)
check_args_altered(parser, args)
check_arg_checkpoints(parser, args)
def check_args_altered(parser, args):
if args.steps and not args.altered:
parser.error("--steps requires --altered.")
elif args.steps and args.altered:
if not os.path.isdir(args.altered):
parser.error("{} directory doesn't exist.".format(args.altered))
from utils import load_json
def arg_altered(parser):
@ -114,15 +44,6 @@ def arg_color_transfer(parser): @@ -114,15 +44,6 @@ def arg_color_transfer(parser):
)
def set_gpu_ids(args):
if args.cpu:
args.gpu_ids = None
elif args.gpu:
args.gpu_ids = args.gpu
else:
args.gpu_ids = None if not gpu_info.get_info()['has_cuda'] else [0]
def arg_cpu(parser):
parser.add_argument(
"--cpu",
@ -158,32 +79,15 @@ def arg_input(parser): @@ -158,32 +79,15 @@ def arg_input(parser):
)
def check_arg_input(parser, args):
if not args.input:
parser.error("-i, --input INPUT is required.")
if not os.path.isdir(args.input) and not os.path.isfile(args.input):
parser.ArgumentTypeError("Input {} file or directory doesn't exist.".format(args.input))
elif os.path.isfile(args.input) and os.path.splitext(args.input)[1] not in cv2_supported_extension() + [".gif"]:
parser.ArgumentTypeError("Input {} file not supported format.".format(args.input))
if os.path.isfile(args.input):
check_image_file_validity(args.input)
return args.input
def arg_json_args(parser):
def check_json_args_file():
def type_func(a):
try:
if os.path.isfile(a):
with open(a, 'r') as f:
j = json.load(f)
else:
j = json.loads(str(a))
j = load_json(a)
except JSONDecodeError:
raise parser.error(
"Arguments json {} is not in valid JSON format.".format(a))
return j
return type_func
parser.add_argument(
@ -231,15 +135,6 @@ def arg_output(parser): @@ -231,15 +135,6 @@ def arg_output(parser):
)
def check_arg_output(parser, args):
if os.path.isfile(args.input) and not args.output:
_, extension = os.path.splitext(args.input)
args.output = "output{}".format(extension)
elif args.output and os.path.isfile(args.input) and os.path.splitext(args.output)[1] \
not in cv2_supported_extension() + [".gif"]:
parser.error("Output {} file not a supported format.".format(args.output))
def arg_overlay(parser):
def check_crops_coord():
def type_func(a):
@ -259,16 +154,6 @@ def arg_overlay(parser): @@ -259,16 +154,6 @@ def arg_overlay(parser):
)
def set_arg_preference(args):
args.prefs = {
"titsize": args.bsize,
"aursize": args.asize,
"nipsize": args.nsize,
"vagsize": args.vsize,
"hairsize": args.hsize
}
def arg_preferences(parser):
parser.add_argument(
"--bsize",

65
argv/run/config.py

@ -0,0 +1,65 @@ @@ -0,0 +1,65 @@
import os
import gpu_info
from argv.checkpoints import set_arg_checkpoints, check_arg_checkpoints
from utils import check_image_file_validity, is_a_supported_image_file_extension
def set_args_run_parser(args):
set_arg_checkpoints(args)
set_arg_preference(args)
set_gpu_ids(args)
def check_args_run_parser(parser, args):
check_arg_input(parser, args)
check_arg_output(parser, args)
check_args_altered(parser, args)
check_arg_checkpoints(parser, args)
def check_args_altered(parser, args):
if args.steps and not args.altered:
parser.error("--steps requires --altered.")
elif args.steps and args.altered:
if not os.path.isdir(args.altered):
parser.error("{} directory doesn't exist.".format(args.altered))
def set_gpu_ids(args):
if args.cpu:
args.gpu_ids = None
elif args.gpu:
args.gpu_ids = args.gpu
else:
args.gpu_ids = None if not gpu_info.get_info()['has_cuda'] else [0]
def check_arg_input(parser, args):
if not args.input:
parser.error("-i, --input INPUT is required.")
if not os.path.isdir(args.input) and not os.path.isfile(args.input):
parser.error("Input {} file or directory doesn't exist.".format(args.input))
elif os.path.isfile(args.input) and not is_a_supported_image_file_extension(args.input):
parser.error("Input {} file not supported format.".format(args.input))
if os.path.isfile(args.input):
check_image_file_validity(args.input)
return args.input
def check_arg_output(parser, args):
if os.path.isfile(args.input) and not args.output:
_, extension = os.path.splitext(args.input)
args.output = "output{}".format(extension)
elif args.output and os.path.isfile(args.input) and not is_a_supported_image_file_extension(args.output):
parser.error("Output {} file not a supported format.".format(args.output))
def set_arg_preference(args):
args.prefs = {
"titsize": args.bsize,
"aursize": args.asize,
"nipsize": args.nsize,
"vagsize": args.vsize,
"hairsize": args.hsize
}

100
processing/__init__.py

@ -3,40 +3,38 @@ import os @@ -3,40 +3,38 @@ import os
import time
from config import Config as Conf
from transform.gan.mask import CorrectToMask, MaskrefToMaskdet, MaskfinToNude
from transform.opencv.correct import DressToCorrect, ColorTransfer
from transform.opencv.mask import MaskToMaskref, MaskdetToMaskfin
from transform.opencv.resize import ImageToResized, ImageToCrop, ImageToOverlay, ImageToResizedCrop, ImageToRescale
from utils import camel_case_to_str, cv2_supported_extension, check_shape
from utils import camel_case_to_str, cv2_supported_extension
class Processing:
"""Abstract Process Class."""
def __init__(self, args=None):
"""
Process Constructor.
Image Transformation Class Constructor.
:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
self.__start = time.time()
self._args = Conf.args.copy() if args is None else args.copy()
def run(self):
def run(self, *args):
"""
Run the process.
Run the Image Transform.
:return: None
:param args: <dict> settings for the transformation
:return: <RGB> image
"""
self.__start = time.time()
self._info_start_run()
self._setup()
self._execute()
self._clean()
self._setup(*args)
r = self._execute(*args)
self._clean(*args)
self._info_end_run()
return r
def _info_start_run(self):
"""
Log info when the process run begin.
Log info at the start of the run.
:return: None
"""
@ -45,36 +43,39 @@ class Processing: @@ -45,36 +43,39 @@ class Processing:
def _info_end_run(self):
"""
Log info when the process run end.
Log info at the end of the run.
:return: None
"""
Conf.log.info("{} Finish".format(camel_case_to_str(self.__class__.__name__)))
Conf.log.debug("{} Done in {} seconds".format(
camel_case_to_str(self.__class__.__name__), round(time.time() - self.__start, 2)))
def _setup(self):
def _setup(self, *args):
"""
Configure the process to be ready to execute.
Configure the transformation.
:param args: <dict> settings for the transformation
:return: None
"""
pass
def _execute(self):
def _execute(self, *args):
"""
Execute the process.
Execute the transformation.
:param args: <dict> settings for the transformation
:return: None
"""
pass
def _clean(self):
def _clean(self, *args):
"""
Cleanup a process execution.
Clean the transformation.
:param args: <dict> settings for the transformation
:return: None
"""
pass
class SimpleProcessing(Processing):
@ -105,56 +106,3 @@ class SimpleProcessing(Processing): @@ -105,56 +106,3 @@ class SimpleProcessing(Processing):
return ImageProcessing(args=args)
else:
return None
def select_phases(args):
"""
Select the transformation phases to use following args parameters.
:return: <ImageTransform[]> list of image transformation
"""
def shift_step(shift_starting=0, shift_ending=0):
if not args['steps']:
args['steps'] = (0, 5)
args['steps'] = (
args['steps'][0] + shift_starting,
args['steps'][1] + shift_ending
)
def add_tail(phases, phase):
phases = [phase] + phases
if args['steps'] and args['steps'][0] != 0:
shift_step(shift_starting=1)
if args['steps'] and args['steps'][1] == len(phases) - 1:
shift_step(shift_ending=1)
return phases
def add_head(phases, phase):
phases = phases + [phase]
if args['steps'] and args['steps'][1] == len(phases) - 1:
shift_step(shift_ending=1)
return phases
phases = [DressToCorrect, CorrectToMask, MaskToMaskref,
MaskrefToMaskdet, MaskdetToMaskfin, MaskfinToNude]
Conf.log.debug(args)
if args['overlay']:
phases = add_tail(phases, ImageToResized)
phases = add_tail(phases, ImageToCrop)
phases = add_head(phases, ImageToOverlay)
elif args['auto_resize']:
phases = add_tail(phases, ImageToResized)
elif args['auto_resize_crop']:
phases = add_tail(phases, ImageToResizedCrop)
elif args['auto_rescale']:
phases = add_tail(phases, ImageToRescale)
elif os.path.isfile(args['input']):
if not args['ignore_size']:
check_shape(args['input'])
else:
Conf.log.warn('Image Size Requirements Unchecked.')
if args['color_transfer']:
phases = add_head(phases, ColorTransfer)
return phases

10
processing/folder.py

@ -7,9 +7,9 @@ import sys @@ -7,9 +7,9 @@ import sys
from json import JSONDecodeError
from config import Config as Conf
from processing import select_phases
from processing.multiple import MultipleImageProcessing
from utils import cv2_supported_extension
from processing.utils import select_phases
from utils import is_a_supported_image_file_extension
class FolderImageProcessing(MultipleImageProcessing):
@ -32,8 +32,7 @@ class FolderImageProcessing(MultipleImageProcessing): @@ -32,8 +32,7 @@ class FolderImageProcessing(MultipleImageProcessing):
for r, _, _ in os.walk(self.__input_folder_path):
args = copy.deepcopy(self._args)
args['input'] = [
x.path for x in os.scandir(r)
if x.is_file() and os.path.splitext(x.path)[1] in cv2_supported_extension() + [".gif"]
x.path for x in os.scandir(r) if x.is_file() and is_a_supported_image_file_extension(x.path)
]
args['phases'] = select_phases(self._args)
args['output'] = [
@ -48,8 +47,7 @@ class FolderImageProcessing(MultipleImageProcessing): @@ -48,8 +47,7 @@ class FolderImageProcessing(MultipleImageProcessing):
pathlib.Path(*pathlib.Path(r).parts[1:]),
os.path.basename(x.path)
)
for x in os.scandir(r)
if x.is_file() and os.path.splitext(x.path)[1] in cv2_supported_extension() + [".gif"]
for x in args['input']
]
self._process_list.append(

9
processing/gif.py

@ -7,7 +7,8 @@ import cv2 @@ -7,7 +7,8 @@ import cv2
import imageio
from config import Config as Conf
from processing import Processing, select_phases
from processing import Processing
from processing.utils import select_phases
from processing.multiple_image import MultipleImageProcessing
from utils import write_image
@ -29,7 +30,7 @@ class GifProcessing(Processing): @@ -29,7 +30,7 @@ class GifProcessing(Processing):
self.__temp_input_paths = []
self.__temp_output_paths = []
def _setup(self):
def _setup(self, *args):
self.__tmp_dir = tempfile.mkdtemp()
Conf.log.debug("Temporay dir is {}".format(self.__tmp_dir))
imgs = imageio.mimread(self.__input_path)
@ -43,7 +44,7 @@ class GifProcessing(Processing): @@ -43,7 +44,7 @@ class GifProcessing(Processing):
for i in zip(imgs, self.__temp_input_paths):
write_image(cv2.cvtColor(i[0], cv2.COLOR_RGB2BGR), i[1])
def _execute(self):
def _execute(self, *args):
"""
Execute all phases on each frames of the gif and recreate the gif.
@ -58,5 +59,5 @@ class GifProcessing(Processing): @@ -58,5 +59,5 @@ class GifProcessing(Processing):
Conf.log.info("{} Gif Created ".format(self.__output_path))
def _clean(self):
def _clean(self, *args):
shutil.rmtree(self.__tmp_dir)

7
processing/image.py

@ -3,7 +3,8 @@ import os @@ -3,7 +3,8 @@ import os
import sys
from config import Config as Conf
from processing import Processing, select_phases
from processing import Processing
from processing.utils import select_phases
from utils import read_image, camel_case_to_str, write_image
@ -40,7 +41,7 @@ class ImageProcessing(Processing): @@ -40,7 +41,7 @@ class ImageProcessing(Processing):
super()._info_start_run()
Conf.log.info("Processing on {}".format(str(self.__image_steps)[2:-2]))
def _setup(self):
def _setup(self, *args):
try:
self.__image_steps = [read_image(x) if isinstance(x, str) else x for x in self.__image_steps]
except FileNotFoundError as e:
@ -51,7 +52,7 @@ class ImageProcessing(Processing): @@ -51,7 +52,7 @@ class ImageProcessing(Processing):
"directory path that contains valid images.")
sys.exit(1)
def _execute(self):
def _execute(self, *args):
"""
Execute all phases on the image.

4
processing/multiple.py

@ -24,7 +24,7 @@ class MultipleImageProcessing(Processing): @@ -24,7 +24,7 @@ class MultipleImageProcessing(Processing):
self.__multiprocessing = Conf.multiprocessing()
self.__children_process = children_process
def _setup(self):
def _setup(self, *args):
self._process_list = []
for input_path, output_path in zip(self._input_paths, self._output_paths):
@ -33,7 +33,7 @@ class MultipleImageProcessing(Processing): @@ -33,7 +33,7 @@ class MultipleImageProcessing(Processing):
args['output'] = output_path
self._process_list.append(self.__children_process(args=args))
def _execute(self):
def _execute(self, *args):
"""
Execute all phases on the list of images.

93
processing/utils.py

@ -0,0 +1,93 @@ @@ -0,0 +1,93 @@
import os
from config import Config as Conf
from transform.gan.mask import CorrectToMask, MaskrefToMaskdet, MaskfinToNude
from transform.opencv.correct import DressToCorrect, ColorTransfer
from transform.opencv.mask import MaskToMaskref, MaskdetToMaskfin
from transform.opencv.resize import ImageToResized, ImageToCrop, ImageToOverlay, ImageToResizedCrop, ImageToRescale
from utils import check_shape
def shift_step(args, shift_start_add=0, shift_end_add=0):
if not args['steps']:
args['steps'] = (0, 5)
args['steps'] = (
args['steps'][0] + shift_start_add,
args['steps'][1] + shift_end_add
)
def shift_starting(args):
if args['steps'] and args['steps'][0] != 0:
shift_step(args, shift_start_add=1)
def shift_ending(args, p):
if args['steps'] and args['steps'][1] == len(p) - 1:
shift_step(args, shift_end_add=1)
def add_tail(args, p, add):
p = [add] + p
shift_starting(args)
shift_ending(args, p)
return p
def add_head(args, p, add):
p = p + [add]
shift_ending(args, p)
return p
def overlay(args, p):
p = add_tail(args, p, ImageToResized)
p = add_tail(args, p, ImageToCrop)
p = add_head(args, p, ImageToOverlay)
return p
def auto_resize(args, p):
return add_tail(args, p, ImageToResized)
def auto_resize_crop(args, p):
return add_tail(args, p, ImageToResizedCrop)
def auto_rescale(args, p):
return add_tail(args, p, ImageToRescale)
def is_file(args):
if not args['ignore_size']:
check_shape(args['input'])
else:
Conf.log.warn('Image Size Requirements Unchecked.')
def scale_mod(args, p):
for mod in (overlay, auto_resize, auto_resize_crop, auto_rescale):
if args[mod.__name__]:
return mod(args, p)
if os.path.isfile(Conf.args["input"]):
is_file(args)
return p
def select_phases(args):
"""
Select the transformation phases to use following args parameters.
:return: <ImageTransform[]> list of image transformation
"""
phases = [DressToCorrect, CorrectToMask, MaskToMaskref,
MaskrefToMaskdet, MaskdetToMaskfin, MaskfinToNude]
phases = scale_mod(args, phases)
if args['color_transfer']:
phases = add_head(args, phases, ColorTransfer)
return phases

66
transform/__init__.py

@ -2,10 +2,10 @@ @@ -2,10 +2,10 @@
import time
from config import Config as Conf
from utils import camel_case_to_str
from processing import Processing
class ImageTransform:
class ImageTransform(Processing):
"""Abstract Image Transformation Class."""
def __init__(self, input_index=(-1,), args=None):
@ -15,66 +15,8 @@ class ImageTransform: @@ -15,66 +15,8 @@ class ImageTransform:
:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(args)
self.__start = time.time()
self.input_index = input_index
self._args = Conf.args.copy() if args is None else args.copy()
def run(self, *args):
"""
Run the Image Transform.
:param args: <dict> settings for the transformation
:return: <RGB> image
"""
self.__start = time.time()
self.info_start_run()
self._setup(*args)
r = self._execute(*args)
self._clean(*args)
self.info_end_run()
return r
def info_start_run(self):
"""
Log info at the start of the run.
:return: None
"""
self.__start = time.time()
Conf.log.info("Executing {}".format(camel_case_to_str(self.__class__.__name__)))
def info_end_run(self):
"""
Log info at the end of the run.
:return: None
"""
Conf.log.debug("{} Done in {} seconds".format(
camel_case_to_str(self.__class__.__name__), round(time.time() - self.__start, 2)))
def _setup(self, *args):
"""
Configure the transformation.
:param args: <dict> settings for the transformation
:return: None
"""
pass
def _execute(self, *args):
"""
Execute the transformation.
:param args: <dict> settings for the transformation
:return: None
"""
pass
def _clean(self, *args):
"""
Clean the transformation.
:param args: <dict> settings for the transformation
:return: None
"""
pass

56
transform/gan/generator.py

@ -134,14 +134,7 @@ class ResnetBlock(torch.nn.Module): @@ -134,14 +134,7 @@ class ResnetBlock(torch.nn.Module):
):
conv_block = []
p = 0
if padding_type == "reflect":
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block, p = ResnetBlock.__increment_padding_conv_block(conv_block, p, padding_type)
conv_block += [
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
@ -152,6 +145,16 @@ class ResnetBlock(torch.nn.Module): @@ -152,6 +145,16 @@ class ResnetBlock(torch.nn.Module):
conv_block += [torch.nn.Dropout(0.5)]
p = 0
conv_block, p = ResnetBlock.__increment_padding_conv_block(conv_block, p, padding_type)
conv_block += [
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
]
return torch.nn.Sequential(*conv_block)
@staticmethod
def __increment_padding_conv_block(conv_block, p, padding_type):
if padding_type == "reflect":
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
@ -160,12 +163,7 @@ class ResnetBlock(torch.nn.Module): @@ -160,12 +163,7 @@ class ResnetBlock(torch.nn.Module):
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
]
return torch.nn.Sequential(*conv_block)
return conv_block, p
def forward(self, x):
"""
@ -230,16 +228,30 @@ def tensor2im(image_tensor, imtype=np.uint8, normalize=True): @@ -230,16 +228,30 @@ def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
:return:
"""
if isinstance(image_tensor, list):
image_numpy = []
for i in image_tensor:
image_numpy.append(tensor2im(i, imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return tesor2im_list(image_tensor, imtype, normalize)
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
return tensor2im_not_list(image_tensor, imtype, normalize)
def tensor2im_not_list(image_tensor, imtype, normalize):
image_numpy = image_tensor.cpu().float().numpy()
image_numpy = tensor2im_normalize(image_numpy, normalize)
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:, :, 0]
return image_numpy.astype(imtype)
def tesor2im_list(image_tensor, imtype, normalize):
image_numpy = []
for i in image_tensor:
image_numpy.append(tensor2im(i, imtype, normalize))
return image_numpy
def tensor2im_normalize(image_numpy, normalize):
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
return image_numpy

42
transform/gan/mask.py

@ -3,10 +3,8 @@ from transform.gan import ImageTransformGAN @@ -3,10 +3,8 @@ from transform.gan import ImageTransformGAN
from config import Config as Conf
class CorrectToMask(ImageTransformGAN):
"""Correct -> Mask [GAN]."""
def __init__(self, input_index=(-1,), args=None):
class MaskImageTransformGAN(ImageTransformGAN):
def __init__(self, mask_name, input_index=(-1,), args=None):
"""
Correct To Mask constructor.
@ -14,14 +12,27 @@ class CorrectToMask(ImageTransformGAN): @@ -14,14 +12,27 @@ class CorrectToMask(ImageTransformGAN):
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(
(args if args is not None else Conf.args)['checkpoints']["correct_to_mask"],
"correct_to_mask",
(args if args is not None else Conf.args)['checkpoints'][mask_name],
mask_name,
input_index=input_index,
args=args
)
class MaskrefToMaskdet(ImageTransformGAN):
class CorrectToMask(MaskImageTransformGAN):
"""Correct -> Mask [GAN]."""
def __init__(self, input_index=(-1,), args=None):
"""
Correct To Mask constructor.
:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__("correct_to_mask", input_index=input_index, args=args)
class MaskrefToMaskdet(MaskImageTransformGAN):
"""Maskref -> Maskdet [GAN]."""
def __init__(self, input_index=(-1,), args=None):
@ -31,15 +42,10 @@ class MaskrefToMaskdet(ImageTransformGAN): @@ -31,15 +42,10 @@ class MaskrefToMaskdet(ImageTransformGAN):
:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(
(args if args is not None else Conf.args)['checkpoints']["maskref_to_maskdet"],
"maskref_to_maskdet",
input_index=input_index,
args=args
)
super().__init__("maskref_to_maskdet", input_index=input_index, args=args)
class MaskfinToNude(ImageTransformGAN):
class MaskfinToNude(MaskImageTransformGAN):
"""Maskfin -> Nude [GAN]."""
def __init__(self, input_index=(-1,), args=None):
@ -49,9 +55,5 @@ class MaskfinToNude(ImageTransformGAN): @@ -49,9 +55,5 @@ class MaskfinToNude(ImageTransformGAN):
:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(
(args if args is not None else Conf.args)['checkpoints']["maskfin_to_nude"],
"maskfin_to_nude",
input_index=input_index,
args=args
)
super().__init__("maskfin_to_nude", input_index=input_index, args=args)

94
transform/opencv/bodypart/__init__.py

@ -4,29 +4,95 @@ @@ -4,29 +4,95 @@
class BodyPart:
"""Body part annotation."""
def __init__(self, name, xmin, ymin, xmax, ymax, x, y, w, h):
def __init__(self, name, bounding_box, center, dimension):
"""
Body Part constructor.
:param name: <string>
:param xmin: <int>
:param ymin: <int>
:param bounding_box: <int>
:param center: <int>
:param xmax: <int>
:param ymax: <int>
:param x: <int>
:param y: <int>
:param w: <int>
:param h: <int>
"""
self.name = name
# Bounding Box:
self.bounding_box = bounding_box
# Center:
self.center = center
# Dimension:
self.dimension = dimension
@staticmethod
def add_body_part_to_list(name, bounding_box, center, dimension, l):
l.append(
BodyPart(name, bounding_box, center, dimension)
)
@property
def xmin(self):
return self.bounding_box.xmin
@property
def ymin(self):
return self.bounding_box.ymin
@property
def xmax(self):
return self.bounding_box.xmax
@property
def ymax(self):
return self.bounding_box.ymax
@property
def x(self):
return self.center.x
@property
def y(self):
return self.center.y
@property
def w(self):
return self.dimension.w
@property
def h(self):
return self.dimension.h
class Dimension:
"""Dimension."""
def __init__(self, w, h):
"""Dimension Constructor."""
self.w = w
self.h = h
class Center:
"""Center."""
def __init__(self, x, y):
"""Center Constructor."""
self.x = x
self.y = y
class BoundingBox:
"""BoundingBox."""
def __init__(self, xmin, ymin, xmax, ymax):
"""BoundingBox Constructor"""
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
# Center:
self.x = x
self.y = y
# Dimensione:
self.w = w
self.h = h
@staticmethod
def calculate_bounding_box(h, w, x, y):
"""Calculate Bounding Box."""
xmin = int(x - (w / 2))
xmax = int(x + (w / 2))
ymin = int(y - (h / 2))
ymax = int(y + (h / 2))
return xmax, xmin, ymax, ymin

159
transform/opencv/bodypart/extract.py

@ -2,7 +2,7 @@ @@ -2,7 +2,7 @@
import cv2
import numpy as np
from transform.opencv.bodypart import BodyPart
from transform.opencv.bodypart import BodyPart, Dimension, BoundingBox, Center
from transform.opencv.bodypart.inferrer import infer_nip, infer_hair
from transform.opencv.bodypart.resolver import detect_tit_aur_missing_problem, resolve_tit_aur_missing_problems
@ -22,10 +22,10 @@ def extract_annotations(maskdet, enable_pubes): @@ -22,10 +22,10 @@ def extract_annotations(maskdet, enable_pubes):
belly_list = find_body_part(maskdet, "belly")
# Filter out parts basing on dimension (area and aspect ratio):
aur_list = filter_dim_parts(aur_list, 100, 1000, 0.5, 3)
tits_list = filter_dim_parts(tits_list, 1000, 60000, 0.2, 3)
vag_list = filter_dim_parts(vag_list, 10, 1000, 0.2, 3)
belly_list = filter_dim_parts(belly_list, 10, 1000, 0.2, 3)
aur_list = filter_dim_parts(aur_list, (100, 1000), (0.5, 3))
tits_list = filter_dim_parts(tits_list, (1000, 60000), (0.2, 3))
vag_list = filter_dim_parts(vag_list, (10, 1000), (0.2, 3))
belly_list = filter_dim_parts(belly_list, (10, 1000), (0.2, 3))
# Filter couple (if parts are > 2, choose only 2)
aur_list = filter_couple(aur_list)
@ -56,62 +56,6 @@ def find_body_part(image, part_name): @@ -56,62 +56,6 @@ def find_body_part(image, part_name):
:param part_name: <string> part_name
:return: <BodyPart[]>list
"""
def calculate_bounding_box(h, w, x, y):
"""Calculate Bounding Box."""
xmin = int(x - (w / 2))
xmax = int(x + (w / 2))
ymin = int(y - (h / 2))
ymax = int(y + (h / 2))
return xmax, xmin, ymax, ymin
def detect_direction(a_max, a_min, angle):
"""Detect direction."""
if angle == 0:
h = a_max
w = a_min
else:
h = a_min
w = a_max
return h, w
def normalize_belly_vag(h, part_name, w):
"""Normalize the belly and vag size."""
if part_name in ("belly", "vag"):
if w < 15:
w *= 2
if h < 15:
h *= 2
return h, w
def get_correct_filter_color(image, part_name):
"""Get the correct color filter."""
if part_name == "tit":
# Use combined color filter
f1 = np.asarray([0, 0, 0]) # tit color filter
f2 = np.asarray([10, 10, 10])
f3 = np.asarray([0, 0, 250]) # aur color filter
f4 = np.asarray([0, 0, 255])
color_mask1 = cv2.inRange(image, f1, f2)
color_mask2 = cv2.inRange(image, f3, f4)
color_mask = cv2.bitwise_or(color_mask1, color_mask2) # combine
elif part_name == "aur":
f1 = np.asarray([0, 0, 250]) # aur color filter
f2 = np.asarray([0, 0, 255])
color_mask = cv2.inRange(image, f1, f2)
elif part_name == "vag":
f1 = np.asarray([250, 0, 0]) # vag filter
f2 = np.asarray([255, 0, 0])
color_mask = cv2.inRange(image, f1, f2)
elif part_name == "belly":
f1 = np.asarray([250, 0, 250]) # belly filter
f2 = np.asarray([255, 0, 255])
color_mask = cv2.inRange(image, f1, f2)
return color_mask
bodypart_list = [] # empty BodyPart list
color_mask = get_correct_filter_color(image, part_name)
@ -138,28 +82,79 @@ def find_body_part(image, part_name): @@ -138,28 +82,79 @@ def find_body_part(image, part_name):
h, w = normalize_belly_vag(h, part_name, w)
xmax, xmin, ymax, ymin = calculate_bounding_box(h, w, x, y)
bodypart_list.append(BodyPart(part_name, xmin, ymin, xmax, ymax, x, y, w, h))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(h, w, x, y)
bodypart_list.append(
BodyPart(part_name, BoundingBox(xmin, ymin, xmax, ymax), Center(x, y), Dimension(w, h))
)
return bodypart_list
def filter_dim_parts(bp_list, min_area, max_area, min_ar, max_ar):
def detect_direction(a_max, a_min, angle):
"""Detect direction."""
if angle == 0:
h = a_max
w = a_min
else:
h = a_min
w = a_max
return h, w
def normalize_belly_vag(h, part_name, w):
"""Normalize the belly and vag size."""
if part_name in ("belly", "vag"):
if w < 15:
w *= 2
if h < 15:
h *= 2
return h, w
def get_correct_filter_color(image, part_name):
"""Get the correct color filter."""
def get_simple_mask(image, l1, l2):
f1 = np.asarray(l1) # aur color filter
f2 = np.asarray(l2)
color_mask = cv2.inRange(image, f1, f2)
return color_mask
if part_name == "tit":
# Use combined color filter
f1 = np.asarray([0, 0, 0]) # tit color filter
f2 = np.asarray([10, 10, 10])
f3 = np.asarray([0, 0, 250]) # aur color filter
f4 = np.asarray([0, 0, 255])
color_mask1 = cv2.inRange(image, f1, f2)
color_mask2 = cv2.inRange(image, f3, f4)
color_mask = cv2.bitwise_or(color_mask1, color_mask2) # combine
elif part_name == "aur":
color_mask = get_simple_mask(image, [0, 0, 250], [0, 0, 250])
elif part_name == "vag":
color_mask = get_simple_mask(image, [250, 0, 0], [250, 0, 0])
elif part_name == "belly":
color_mask = get_simple_mask(image, [250, 0, 250], [255, 0, 255])
return color_mask
def filter_dim_parts(bp_list, min_max_area, min_max_ar):
"""
Filter a body part list with area and aspect ration.
:param bp_list: BodyPart[]>list
:param min_area: <num> minimum area of part
:param max_area: <num> max area
:param min_ar: <num> min aspect ratio
:param max_ar: <num> max aspect ratio
:param min_max_area: <(num,num)> minimum,max area of part
:param min_max_area: <num> minimum,max aspect ratio
:return: <BodyPart[]>list
"""
b_filt = []
for obj in bp_list:
if min_area < obj.w * obj.h < max_area and min_ar < obj.w / obj.h < max_ar:
if min_max_area[0] < obj.w * obj.h < min_max_area[1] and min_max_ar[0] < obj.w / obj.h < min_max_ar[1]:
b_filt.append(obj)
return b_filt
@ -167,7 +162,7 @@ def filter_dim_parts(bp_list, min_area, max_area, min_ar, max_ar): @@ -167,7 +162,7 @@ def filter_dim_parts(bp_list, min_area, max_area, min_ar, max_ar):
def filter_couple(bp_list):
"""
Filer couple in body part list.
Filter couple in body part list.
:param bp_list: <BodyPart[]>list
:return: <BodyPart[]>list
@ -180,15 +175,7 @@ def filter_couple(bp_list): @@ -180,15 +175,7 @@ def filter_couple(bp_list):
min_b = 1
min_diff = abs(bp_list[min_a].y - bp_list[min_b].y)
for a, _ in enumerate(bp_list):
for b, _ in enumerate(bp_list):
# TODO: avoid repetition (1,0) (0,1)
if a != b:
diff = abs(bp_list[a].y - bp_list[b].y)
if diff < min_diff:
min_diff = diff
min_a = a
min_b = b
min_a, min_b = find_min(bp_list, min_a, min_b, min_diff)
b_filt = [bp_list[min_a], bp_list[min_b]]
@ -196,3 +183,15 @@ def filter_couple(bp_list): @@ -196,3 +183,15 @@ def filter_couple(bp_list):
else:
# No change
return bp_list
def find_min(bp_list, min_a, min_b, min_diff):
for a, _ in enumerate(bp_list):
for b, _ in enumerate(bp_list):
# TODO: avoid repetition (1,0) (0,1)
diff = abs(bp_list[a].y - bp_list[b].y)
if a != b and diff < min_diff:
min_diff = diff
min_a = a
min_b = b
return min_a, min_b

22
transform/opencv/bodypart/inferrer.py

@ -1,7 +1,7 @@ @@ -1,7 +1,7 @@
"""Inference Body part functions."""
import random
from transform.opencv.bodypart import BodyPart
from transform.opencv.bodypart import BodyPart, BoundingBox, Dimension, Center
def infer_nip(aur_list):
@ -25,12 +25,12 @@ def infer_nip(aur_list): @@ -25,12 +25,12 @@ def infer_nip(aur_list):
y = aur.y
# Calculate Bounding Box:
xmin = int(x - (nip_dim / 2))
xmax = int(x + (nip_dim / 2))
ymin = int(y - (nip_dim / 2))
ymax = int(y + (nip_dim / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(nip_dim, nip_dim, x, y)
nip_list.append(BodyPart("nip", xmin, ymin, xmax, ymax, x, y, nip_dim, nip_dim))
nip_list.append(
BodyPart("nip", BoundingBox(xmin, ymin, xmax, ymax), Center(x, y), Dimension(nip_dim, nip_dim)
)
)
return nip_list
@ -55,12 +55,10 @@ def infer_hair(vag_list, enable): @@ -55,12 +55,10 @@ def infer_hair(vag_list, enable):
x = vag.x
y = vag.y - (hair_h / 2) - (vag.h / 2)
# Calculate Bounding Box:
xmin = int(x - (hair_w / 2))
xmax = int(x + (hair_w / 2))
ymin = int(y - (hair_h / 2))
ymax = int(y + (hair_h / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(hair_h,hair_w, x, y)
hair_list.append(BodyPart("hair", xmin, ymin, xmax, ymax, x, y, hair_w, hair_h))
hair_list.append(
BodyPart("nip", BoundingBox(xmin, ymin, xmax, ymax), Center(x, y), Dimension(hair_w, hair_h))
)
return hair_list

94
transform/opencv/bodypart/resolver.py

@ -1,7 +1,7 @@ @@ -1,7 +1,7 @@
"""Inference Body problems resolver."""
import random
from transform.opencv.bodypart import BodyPart
from transform.opencv.bodypart import BodyPart, BoundingBox, Center, Dimension
def detect_tit_aur_missing_problem(tits_list, aur_list):
@ -44,6 +44,21 @@ def resolve_tit_aur_missing_problems(tits_list, aur_list, problem_code): @@ -44,6 +44,21 @@ def resolve_tit_aur_missing_problems(tits_list, aur_list, problem_code):
:param problem_code: <int> problem code
:return: None
"""
def find_l2_width_is_full(l1, l2):
d1 = abs(l1[0].x - l2[0].x)
d2 = abs(l1[0].x - l2[1].x)
if d1 > d2:
# l1[0] is empty
new_x = l2[0].x
new_y = l2[0].y
else:
# l1[1] is empty
new_x = l2[1].x
new_y = l2[1].y
return new_x, new_y
def resolve_problem_3():
random_tit_factor = random.randint(2, 5) # TOTEST
@ -52,46 +67,33 @@ def resolve_tit_aur_missing_problems(tits_list, aur_list, problem_code): @@ -52,46 +67,33 @@ def resolve_tit_aur_missing_problems(tits_list, aur_list, problem_code):
new_x = aur_list[0].x
new_y = aur_list[0].y
xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(new_w, new_w, new_x, new_y)
tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
BodyPart.add_body_part_to_list("tit", BoundingBox(xmin, ymin, xmax, ymax), Center(new_x, new_y),
Dimension(new_w, new_w), tits_list)
# Add the second tit: