Browse Source

Merge pull request #49 from PommeDroid/utimate_refactoring

Ultimate Refactoring
tags/v1.2.10
deeppppp 2 years ago
parent
commit
6fd8afecbb
No account linked to committer's email address

+ 2
- 1
argv/__init__.py View File

@@ -8,7 +8,8 @@ from argv.checkpoints import init_checkpoints_sub_parser, check_args_checkpoints
from argv.common import arg_help, arg_debug, arg_version
from argv.daemon import init_daemon_sub_parser
from argv.gpu_info import init_gpu_info_sub_parser
from argv.run import init_run_parser, check_args_run_parser, set_args_run_parser
from argv.run import init_run_parser
from argv.run.config import set_args_run_parser, check_args_run_parser
from utils import setup_log, json_to_argv



+ 49
- 0
argv/run/__init__.py View File

@@ -0,0 +1,49 @@
import main
from argv.checkpoints import arg_checkpoints
from argv.common import arg_debug, arg_help, arg_version
from argv.run.argument import arg_altered, arg_auto_rescale, arg_auto_resize, arg_auto_resize_crop, arg_color_transfer, \
arg_cpu, arg_gpu, arg_ignore_size, arg_input, arg_json_args, arg_json_folder_name, arg_n_core, arg_n_run, \
arg_output, arg_overlay, arg_preferences, arg_step


def init_run_parser(subparsers):
run_parser = subparsers.add_parser(
'run',
description="Process image(s) with dreampower.",
help="Process image(s) with dreampower.",
add_help=False
)
run_parser.set_defaults(func=main.main)

# conflicts handler
processing_mod = run_parser.add_mutually_exclusive_group()
scale_mod = run_parser.add_mutually_exclusive_group()

# add run arguments
arg_input(run_parser)
arg_output(run_parser)

arg_auto_rescale(scale_mod)
arg_auto_resize(scale_mod)
arg_auto_resize_crop(scale_mod)
arg_overlay(scale_mod)
arg_ignore_size(scale_mod)

arg_color_transfer(run_parser)

arg_preferences(run_parser)
arg_n_run(run_parser)
arg_step(run_parser)
arg_altered(run_parser)

arg_cpu(processing_mod)
arg_gpu(processing_mod)
arg_checkpoints(run_parser)
arg_n_core(run_parser)

arg_json_args(run_parser)
arg_json_folder_name(run_parser)

arg_help(run_parser)
arg_debug(run_parser)
arg_version(run_parser)

argv/run.py → argv/run/argument.py View File

@@ -1,77 +1,7 @@
import json
import os
import re
from json import JSONDecodeError

import gpu_info
import main
from argv.checkpoints import check_arg_checkpoints, set_arg_checkpoints, arg_checkpoints
from argv.common import arg_debug, arg_help, arg_version
from utils import check_image_file_validity, cv2_supported_extension


def init_run_parser(subparsers):
run_parser = subparsers.add_parser(
'run',
description="Process image(s) with dreampower.",
help="Process image(s) with dreampower.",
add_help=False
)
run_parser.set_defaults(func=main.main)

# conflicts handler
processing_mod = run_parser.add_mutually_exclusive_group()
scale_mod = run_parser.add_mutually_exclusive_group()

# add run arguments
arg_input(run_parser)
arg_output(run_parser)

arg_auto_rescale(scale_mod)
arg_auto_resize(scale_mod)
arg_auto_resize_crop(scale_mod)
arg_overlay(scale_mod)
arg_ignore_size(scale_mod)

arg_color_transfer(run_parser)

arg_preferences(run_parser)
arg_n_run(run_parser)
arg_step(run_parser)
arg_altered(run_parser)

arg_cpu(processing_mod)
arg_gpu(processing_mod)
arg_checkpoints(run_parser)
arg_n_core(run_parser)

arg_json_args(run_parser)
arg_json_folder_name(run_parser)

arg_help(run_parser)
arg_debug(run_parser)
arg_version(run_parser)


def set_args_run_parser(args):
set_arg_checkpoints(args)
set_arg_preference(args)
set_gpu_ids(args)


def check_args_run_parser(parser, args):
check_arg_input(parser, args)
check_arg_output(parser, args)
check_args_altered(parser, args)
check_arg_checkpoints(parser, args)


def check_args_altered(parser, args):
if args.steps and not args.altered:
parser.error("--steps requires --altered.")
elif args.steps and args.altered:
if not os.path.isdir(args.altered):
parser.error("{} directory doesn't exist.".format(args.altered))
from utils import load_json


def arg_altered(parser):
@@ -114,15 +44,6 @@ def arg_color_transfer(parser):
)


def set_gpu_ids(args):
if args.cpu:
args.gpu_ids = None
elif args.gpu:
args.gpu_ids = args.gpu
else:
args.gpu_ids = None if not gpu_info.get_info()['has_cuda'] else [0]


def arg_cpu(parser):
parser.add_argument(
"--cpu",
@@ -158,32 +79,15 @@ def arg_input(parser):
)


def check_arg_input(parser, args):
if not args.input:
parser.error("-i, --input INPUT is required.")
if not os.path.isdir(args.input) and not os.path.isfile(args.input):
parser.ArgumentTypeError("Input {} file or directory doesn't exist.".format(args.input))
elif os.path.isfile(args.input) and os.path.splitext(args.input)[1] not in cv2_supported_extension() + [".gif"]:
parser.ArgumentTypeError("Input {} file not supported format.".format(args.input))
if os.path.isfile(args.input):
check_image_file_validity(args.input)
return args.input


def arg_json_args(parser):
def check_json_args_file():
def type_func(a):
try:
if os.path.isfile(a):
with open(a, 'r') as f:
j = json.load(f)
else:
j = json.loads(str(a))
j = load_json(a)
except JSONDecodeError:
raise parser.error(
"Arguments json {} is not in valid JSON format.".format(a))
return j

return type_func

parser.add_argument(
@@ -231,15 +135,6 @@ def arg_output(parser):
)


def check_arg_output(parser, args):
if os.path.isfile(args.input) and not args.output:
_, extension = os.path.splitext(args.input)
args.output = "output{}".format(extension)
elif args.output and os.path.isfile(args.input) and os.path.splitext(args.output)[1] \
not in cv2_supported_extension() + [".gif"]:
parser.error("Output {} file not a supported format.".format(args.output))


def arg_overlay(parser):
def check_crops_coord():
def type_func(a):
@@ -259,16 +154,6 @@ def arg_overlay(parser):
)


def set_arg_preference(args):
args.prefs = {
"titsize": args.bsize,
"aursize": args.asize,
"nipsize": args.nsize,
"vagsize": args.vsize,
"hairsize": args.hsize
}


def arg_preferences(parser):
parser.add_argument(
"--bsize",

+ 65
- 0
argv/run/config.py View File

@@ -0,0 +1,65 @@
import os

import gpu_info
from argv.checkpoints import set_arg_checkpoints, check_arg_checkpoints
from utils import check_image_file_validity, is_a_supported_image_file_extension


def set_args_run_parser(args):
set_arg_checkpoints(args)
set_arg_preference(args)
set_gpu_ids(args)


def check_args_run_parser(parser, args):
check_arg_input(parser, args)
check_arg_output(parser, args)
check_args_altered(parser, args)
check_arg_checkpoints(parser, args)


def check_args_altered(parser, args):
if args.steps and not args.altered:
parser.error("--steps requires --altered.")
elif args.steps and args.altered:
if not os.path.isdir(args.altered):
parser.error("{} directory doesn't exist.".format(args.altered))


def set_gpu_ids(args):
if args.cpu:
args.gpu_ids = None
elif args.gpu:
args.gpu_ids = args.gpu
else:
args.gpu_ids = None if not gpu_info.get_info()['has_cuda'] else [0]


def check_arg_input(parser, args):
if not args.input:
parser.error("-i, --input INPUT is required.")
if not os.path.isdir(args.input) and not os.path.isfile(args.input):
parser.error("Input {} file or directory doesn't exist.".format(args.input))
elif os.path.isfile(args.input) and not is_a_supported_image_file_extension(args.input):
parser.error("Input {} file not supported format.".format(args.input))
if os.path.isfile(args.input):
check_image_file_validity(args.input)
return args.input


def check_arg_output(parser, args):
if os.path.isfile(args.input) and not args.output:
_, extension = os.path.splitext(args.input)
args.output = "output{}".format(extension)
elif args.output and os.path.isfile(args.input) and not is_a_supported_image_file_extension(args.output):
parser.error("Output {} file not a supported format.".format(args.output))


def set_arg_preference(args):
args.prefs = {
"titsize": args.bsize,
"aursize": args.asize,
"nipsize": args.nsize,
"vagsize": args.vsize,
"hairsize": args.hsize
}

+ 24
- 76
processing/__init__.py View File

@@ -3,40 +3,38 @@ import os
import time

from config import Config as Conf
from transform.gan.mask import CorrectToMask, MaskrefToMaskdet, MaskfinToNude
from transform.opencv.correct import DressToCorrect, ColorTransfer
from transform.opencv.mask import MaskToMaskref, MaskdetToMaskfin
from transform.opencv.resize import ImageToResized, ImageToCrop, ImageToOverlay, ImageToResizedCrop, ImageToRescale
from utils import camel_case_to_str, cv2_supported_extension, check_shape
from utils import camel_case_to_str, cv2_supported_extension


class Processing:
"""Abstract Process Class."""

def __init__(self, args=None):
"""
Process Constructor.
Image Transformation Class Constructor.

:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
self.__start = time.time()
self._args = Conf.args.copy() if args is None else args.copy()

def run(self):
def run(self, *args):
"""
Run the process.
Run the Image Transform.

:return: None
:param args: <dict> settings for the transformation
:return: <RGB> image
"""
self.__start = time.time()
self._info_start_run()
self._setup()
self._execute()
self._clean()
self._setup(*args)
r = self._execute(*args)
self._clean(*args)
self._info_end_run()
return r

def _info_start_run(self):
"""
Log info when the process run begin.
Log info at the start of the run.

:return: None
"""
@@ -45,36 +43,39 @@ class Processing:

def _info_end_run(self):
"""
Log info when the process run end.
Log info at the end of the run.

:return: None
"""
Conf.log.info("{} Finish".format(camel_case_to_str(self.__class__.__name__)))
Conf.log.debug("{} Done in {} seconds".format(
camel_case_to_str(self.__class__.__name__), round(time.time() - self.__start, 2)))

def _setup(self):
def _setup(self, *args):
"""
Configure the process to be ready to execute.
Configure the transformation.

:param args: <dict> settings for the transformation
:return: None
"""
pass

def _execute(self):
def _execute(self, *args):
"""
Execute the process.
Execute the transformation.

:param args: <dict> settings for the transformation
:return: None
"""
pass

def _clean(self):
def _clean(self, *args):
"""
Cleanup a process execution.
Clean the transformation.

:param args: <dict> settings for the transformation
:return: None
"""
pass


class SimpleProcessing(Processing):
@@ -105,56 +106,3 @@ class SimpleProcessing(Processing):
return ImageProcessing(args=args)
else:
return None


def select_phases(args):
"""
Select the transformation phases to use following args parameters.

:return: <ImageTransform[]> list of image transformation
"""
def shift_step(shift_starting=0, shift_ending=0):
if not args['steps']:
args['steps'] = (0, 5)
args['steps'] = (
args['steps'][0] + shift_starting,
args['steps'][1] + shift_ending
)

def add_tail(phases, phase):
phases = [phase] + phases
if args['steps'] and args['steps'][0] != 0:
shift_step(shift_starting=1)
if args['steps'] and args['steps'][1] == len(phases) - 1:
shift_step(shift_ending=1)
return phases

def add_head(phases, phase):
phases = phases + [phase]
if args['steps'] and args['steps'][1] == len(phases) - 1:
shift_step(shift_ending=1)
return phases

phases = [DressToCorrect, CorrectToMask, MaskToMaskref,
MaskrefToMaskdet, MaskdetToMaskfin, MaskfinToNude]
Conf.log.debug(args)
if args['overlay']:
phases = add_tail(phases, ImageToResized)
phases = add_tail(phases, ImageToCrop)
phases = add_head(phases, ImageToOverlay)
elif args['auto_resize']:
phases = add_tail(phases, ImageToResized)
elif args['auto_resize_crop']:
phases = add_tail(phases, ImageToResizedCrop)
elif args['auto_rescale']:
phases = add_tail(phases, ImageToRescale)
elif os.path.isfile(args['input']):
if not args['ignore_size']:
check_shape(args['input'])
else:
Conf.log.warn('Image Size Requirements Unchecked.')

if args['color_transfer']:
phases = add_head(phases, ColorTransfer)

return phases

+ 4
- 6
processing/folder.py View File

@@ -7,9 +7,9 @@ import sys
from json import JSONDecodeError

from config import Config as Conf
from processing import select_phases
from processing.multiple import MultipleImageProcessing
from utils import cv2_supported_extension
from processing.utils import select_phases
from utils import is_a_supported_image_file_extension


class FolderImageProcessing(MultipleImageProcessing):
@@ -32,8 +32,7 @@ class FolderImageProcessing(MultipleImageProcessing):
for r, _, _ in os.walk(self.__input_folder_path):
args = copy.deepcopy(self._args)
args['input'] = [
x.path for x in os.scandir(r)
if x.is_file() and os.path.splitext(x.path)[1] in cv2_supported_extension() + [".gif"]
x.path for x in os.scandir(r) if x.is_file() and is_a_supported_image_file_extension(x.path)
]
args['phases'] = select_phases(self._args)
args['output'] = [
@@ -48,8 +47,7 @@ class FolderImageProcessing(MultipleImageProcessing):
pathlib.Path(*pathlib.Path(r).parts[1:]),
os.path.basename(x.path)
)
for x in os.scandir(r)
if x.is_file() and os.path.splitext(x.path)[1] in cv2_supported_extension() + [".gif"]
for x in args['input']
]

self._process_list.append(

+ 5
- 4
processing/gif.py View File

@@ -7,7 +7,8 @@ import cv2
import imageio

from config import Config as Conf
from processing import Processing, select_phases
from processing import Processing
from processing.utils import select_phases
from processing.multiple_image import MultipleImageProcessing
from utils import write_image

@@ -29,7 +30,7 @@ class GifProcessing(Processing):
self.__temp_input_paths = []
self.__temp_output_paths = []

def _setup(self):
def _setup(self, *args):
self.__tmp_dir = tempfile.mkdtemp()
Conf.log.debug("Temporay dir is {}".format(self.__tmp_dir))
imgs = imageio.mimread(self.__input_path)
@@ -43,7 +44,7 @@ class GifProcessing(Processing):
for i in zip(imgs, self.__temp_input_paths):
write_image(cv2.cvtColor(i[0], cv2.COLOR_RGB2BGR), i[1])

def _execute(self):
def _execute(self, *args):
"""
Execute all phases on each frames of the gif and recreate the gif.

@@ -58,5 +59,5 @@ class GifProcessing(Processing):

Conf.log.info("{} Gif Created ".format(self.__output_path))

def _clean(self):
def _clean(self, *args):
shutil.rmtree(self.__tmp_dir)

+ 4
- 3
processing/image.py View File

@@ -3,7 +3,8 @@ import os
import sys

from config import Config as Conf
from processing import Processing, select_phases
from processing import Processing
from processing.utils import select_phases
from utils import read_image, camel_case_to_str, write_image


@@ -40,7 +41,7 @@ class ImageProcessing(Processing):
super()._info_start_run()
Conf.log.info("Processing on {}".format(str(self.__image_steps)[2:-2]))

def _setup(self):
def _setup(self, *args):
try:
self.__image_steps = [read_image(x) if isinstance(x, str) else x for x in self.__image_steps]
except FileNotFoundError as e:
@@ -51,7 +52,7 @@ class ImageProcessing(Processing):
"directory path that contains valid images.")
sys.exit(1)

def _execute(self):
def _execute(self, *args):
"""
Execute all phases on the image.


+ 2
- 2
processing/multiple.py View File

@@ -24,7 +24,7 @@ class MultipleImageProcessing(Processing):
self.__multiprocessing = Conf.multiprocessing()
self.__children_process = children_process

def _setup(self):
def _setup(self, *args):
self._process_list = []

for input_path, output_path in zip(self._input_paths, self._output_paths):
@@ -33,7 +33,7 @@ class MultipleImageProcessing(Processing):
args['output'] = output_path
self._process_list.append(self.__children_process(args=args))

def _execute(self):
def _execute(self, *args):
"""
Execute all phases on the list of images.


+ 93
- 0
processing/utils.py View File

@@ -0,0 +1,93 @@
import os

from config import Config as Conf
from transform.gan.mask import CorrectToMask, MaskrefToMaskdet, MaskfinToNude
from transform.opencv.correct import DressToCorrect, ColorTransfer
from transform.opencv.mask import MaskToMaskref, MaskdetToMaskfin
from transform.opencv.resize import ImageToResized, ImageToCrop, ImageToOverlay, ImageToResizedCrop, ImageToRescale
from utils import check_shape


def shift_step(args, shift_start_add=0, shift_end_add=0):
if not args['steps']:
args['steps'] = (0, 5)
args['steps'] = (
args['steps'][0] + shift_start_add,
args['steps'][1] + shift_end_add
)


def shift_starting(args):
if args['steps'] and args['steps'][0] != 0:
shift_step(args, shift_start_add=1)


def shift_ending(args, p):
if args['steps'] and args['steps'][1] == len(p) - 1:
shift_step(args, shift_end_add=1)


def add_tail(args, p, add):
p = [add] + p
shift_starting(args)
shift_ending(args, p)
return p


def add_head(args, p, add):
p = p + [add]
shift_ending(args, p)
return p


def overlay(args, p):
p = add_tail(args, p, ImageToResized)
p = add_tail(args, p, ImageToCrop)
p = add_head(args, p, ImageToOverlay)
return p


def auto_resize(args, p):
return add_tail(args, p, ImageToResized)


def auto_resize_crop(args, p):
return add_tail(args, p, ImageToResizedCrop)


def auto_rescale(args, p):
return add_tail(args, p, ImageToRescale)


def is_file(args):
if not args['ignore_size']:
check_shape(args['input'])
else:
Conf.log.warn('Image Size Requirements Unchecked.')


def scale_mod(args, p):
for mod in (overlay, auto_resize, auto_resize_crop, auto_rescale):
if args[mod.__name__]:
return mod(args, p)
if os.path.isfile(Conf.args["input"]):
is_file(args)
return p


def select_phases(args):
"""
Select the transformation phases to use following args parameters.

:return: <ImageTransform[]> list of image transformation
"""

phases = [DressToCorrect, CorrectToMask, MaskToMaskref,
MaskrefToMaskdet, MaskdetToMaskfin, MaskfinToNude]

phases = scale_mod(args, phases)

if args['color_transfer']:
phases = add_head(args, phases, ColorTransfer)

return phases

+ 4
- 62
transform/__init__.py View File

@@ -2,10 +2,10 @@
import time

from config import Config as Conf
from utils import camel_case_to_str
from processing import Processing


class ImageTransform:
class ImageTransform(Processing):
"""Abstract Image Transformation Class."""

def __init__(self, input_index=(-1,), args=None):
@@ -15,66 +15,8 @@ class ImageTransform:
:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""

super().__init__(args)
self.__start = time.time()
self.input_index = input_index
self._args = Conf.args.copy() if args is None else args.copy()

def run(self, *args):
"""
Run the Image Transform.

:param args: <dict> settings for the transformation
:return: <RGB> image
"""
self.__start = time.time()
self.info_start_run()
self._setup(*args)
r = self._execute(*args)
self._clean(*args)
self.info_end_run()
return r

def info_start_run(self):
"""
Log info at the start of the run.

:return: None
"""
self.__start = time.time()
Conf.log.info("Executing {}".format(camel_case_to_str(self.__class__.__name__)))

def info_end_run(self):
"""
Log info at the end of the run.

:return: None
"""
Conf.log.debug("{} Done in {} seconds".format(
camel_case_to_str(self.__class__.__name__), round(time.time() - self.__start, 2)))

def _setup(self, *args):
"""
Configure the transformation.

:param args: <dict> settings for the transformation
:return: None
"""
pass

def _execute(self, *args):
"""
Execute the transformation.

:param args: <dict> settings for the transformation
:return: None
"""
pass

def _clean(self, *args):
"""
Clean the transformation.

:param args: <dict> settings for the transformation
:return: None
"""
pass

+ 34
- 22
transform/gan/generator.py View File

@@ -134,14 +134,7 @@ class ResnetBlock(torch.nn.Module):
):
conv_block = []
p = 0
if padding_type == "reflect":
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
conv_block += [torch.nn.ReplicationPad2d(1)]
elif padding_type == "zero":
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block, p = ResnetBlock.__increment_padding_conv_block(conv_block, p, padding_type)

conv_block += [
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
@@ -152,6 +145,16 @@ class ResnetBlock(torch.nn.Module):
conv_block += [torch.nn.Dropout(0.5)]

p = 0
conv_block, p = ResnetBlock.__increment_padding_conv_block(conv_block, p, padding_type)
conv_block += [
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
]

return torch.nn.Sequential(*conv_block)

@staticmethod
def __increment_padding_conv_block(conv_block, p, padding_type):
if padding_type == "reflect":
conv_block += [torch.nn.ReflectionPad2d(1)]
elif padding_type == "replicate":
@@ -160,12 +163,7 @@ class ResnetBlock(torch.nn.Module):
p = 1
else:
raise NotImplementedError("padding [%s] is not implemented" % padding_type)
conv_block += [
torch.nn.Conv2d(dim, dim, kernel_size=3, padding=p),
norm_layer(dim),
]

return torch.nn.Sequential(*conv_block)
return conv_block, p

def forward(self, x):
"""
@@ -230,16 +228,30 @@ def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
:return:
"""
if isinstance(image_tensor, list):
image_numpy = []
for i in image_tensor:
image_numpy.append(tensor2im(i, imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
return tesor2im_list(image_tensor, imtype, normalize)
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
return tensor2im_not_list(image_tensor, imtype, normalize)


def tensor2im_not_list(image_tensor, imtype, normalize):
image_numpy = image_tensor.cpu().float().numpy()
image_numpy = tensor2im_normalize(image_numpy, normalize)
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:, :, 0]
return image_numpy.astype(imtype)


def tesor2im_list(image_tensor, imtype, normalize):
image_numpy = []
for i in image_tensor:
image_numpy.append(tensor2im(i, imtype, normalize))
return image_numpy


def tensor2im_normalize(image_numpy, normalize):
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
return image_numpy

+ 22
- 20
transform/gan/mask.py View File

@@ -3,10 +3,8 @@ from transform.gan import ImageTransformGAN
from config import Config as Conf


class CorrectToMask(ImageTransformGAN):
"""Correct -> Mask [GAN]."""

def __init__(self, input_index=(-1,), args=None):
class MaskImageTransformGAN(ImageTransformGAN):
def __init__(self, mask_name, input_index=(-1,), args=None):
"""
Correct To Mask constructor.

@@ -14,14 +12,27 @@ class CorrectToMask(ImageTransformGAN):
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(
(args if args is not None else Conf.args)['checkpoints']["correct_to_mask"],
"correct_to_mask",
(args if args is not None else Conf.args)['checkpoints'][mask_name],
mask_name,
input_index=input_index,
args=args
)


class MaskrefToMaskdet(ImageTransformGAN):
class CorrectToMask(MaskImageTransformGAN):
"""Correct -> Mask [GAN]."""

def __init__(self, input_index=(-1,), args=None):
"""
Correct To Mask constructor.

:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__("correct_to_mask", input_index=input_index, args=args)


class MaskrefToMaskdet(MaskImageTransformGAN):
"""Maskref -> Maskdet [GAN]."""

def __init__(self, input_index=(-1,), args=None):
@@ -31,15 +42,10 @@ class MaskrefToMaskdet(ImageTransformGAN):
:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(
(args if args is not None else Conf.args)['checkpoints']["maskref_to_maskdet"],
"maskref_to_maskdet",
input_index=input_index,
args=args
)
super().__init__("maskref_to_maskdet", input_index=input_index, args=args)


class MaskfinToNude(ImageTransformGAN):
class MaskfinToNude(MaskImageTransformGAN):
"""Maskfin -> Nude [GAN]."""

def __init__(self, input_index=(-1,), args=None):
@@ -49,9 +55,5 @@ class MaskfinToNude(ImageTransformGAN):
:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(
(args if args is not None else Conf.args)['checkpoints']["maskfin_to_nude"],
"maskfin_to_nude",
input_index=input_index,
args=args
)
super().__init__("maskfin_to_nude", input_index=input_index, args=args)


+ 80
- 14
transform/opencv/bodypart/__init__.py View File

@@ -4,29 +4,95 @@
class BodyPart:
"""Body part annotation."""

def __init__(self, name, xmin, ymin, xmax, ymax, x, y, w, h):
def __init__(self, name, bounding_box, center, dimension):
"""
Body Part constructor.

:param name: <string>
:param xmin: <int>
:param ymin: <int>
:param bounding_box: <int>
:param center: <int>
:param xmax: <int>
:param ymax: <int>
:param x: <int>
:param y: <int>
:param w: <int>
:param h: <int>
"""
self.name = name
# Bounding Box:
self.bounding_box = bounding_box
# Center:
self.center = center
# Dimension:
self.dimension = dimension

@staticmethod
def add_body_part_to_list(name, bounding_box, center, dimension, l):
l.append(
BodyPart(name, bounding_box, center, dimension)
)

@property
def xmin(self):
return self.bounding_box.xmin

@property
def ymin(self):
return self.bounding_box.ymin

@property
def xmax(self):
return self.bounding_box.xmax

@property
def ymax(self):
return self.bounding_box.ymax

@property
def x(self):
return self.center.x

@property
def y(self):
return self.center.y

@property
def w(self):
return self.dimension.w

@property
def h(self):
return self.dimension.h


class Dimension:
"""Dimension."""

def __init__(self, w, h):
"""Dimension Constructor."""
self.w = w
self.h = h


class Center:
"""Center."""

def __init__(self, x, y):
"""Center Constructor."""
self.x = x
self.y = y


class BoundingBox:
"""BoundingBox."""

def __init__(self, xmin, ymin, xmax, ymax):
"""BoundingBox Constructor"""
self.xmin = xmin
self.ymin = ymin
self.xmax = xmax
self.ymax = ymax
# Center:
self.x = x
self.y = y
# Dimensione:
self.w = w
self.h = h

@staticmethod
def calculate_bounding_box(h, w, x, y):
"""Calculate Bounding Box."""
xmin = int(x - (w / 2))
xmax = int(x + (w / 2))
ymin = int(y - (h / 2))
ymax = int(y + (h / 2))
return xmax, xmin, ymax, ymin

+ 79
- 80
transform/opencv/bodypart/extract.py View File

@@ -2,7 +2,7 @@
import cv2
import numpy as np

from transform.opencv.bodypart import BodyPart
from transform.opencv.bodypart import BodyPart, Dimension, BoundingBox, Center
from transform.opencv.bodypart.inferrer import infer_nip, infer_hair
from transform.opencv.bodypart.resolver import detect_tit_aur_missing_problem, resolve_tit_aur_missing_problems

@@ -22,10 +22,10 @@ def extract_annotations(maskdet, enable_pubes):
belly_list = find_body_part(maskdet, "belly")

# Filter out parts basing on dimension (area and aspect ratio):
aur_list = filter_dim_parts(aur_list, 100, 1000, 0.5, 3)
tits_list = filter_dim_parts(tits_list, 1000, 60000, 0.2, 3)
vag_list = filter_dim_parts(vag_list, 10, 1000, 0.2, 3)
belly_list = filter_dim_parts(belly_list, 10, 1000, 0.2, 3)
aur_list = filter_dim_parts(aur_list, (100, 1000), (0.5, 3))
tits_list = filter_dim_parts(tits_list, (1000, 60000), (0.2, 3))
vag_list = filter_dim_parts(vag_list, (10, 1000), (0.2, 3))
belly_list = filter_dim_parts(belly_list, (10, 1000), (0.2, 3))

# Filter couple (if parts are > 2, choose only 2)
aur_list = filter_couple(aur_list)
@@ -56,62 +56,6 @@ def find_body_part(image, part_name):
:param part_name: <string> part_name
:return: <BodyPart[]>list
"""
def calculate_bounding_box(h, w, x, y):
"""Calculate Bounding Box."""
xmin = int(x - (w / 2))
xmax = int(x + (w / 2))
ymin = int(y - (h / 2))
ymax = int(y + (h / 2))
return xmax, xmin, ymax, ymin

def detect_direction(a_max, a_min, angle):
"""Detect direction."""
if angle == 0:
h = a_max
w = a_min
else:
h = a_min
w = a_max
return h, w

def normalize_belly_vag(h, part_name, w):
"""Normalize the belly and vag size."""
if part_name in ("belly", "vag"):
if w < 15:
w *= 2
if h < 15:
h *= 2
return h, w

def get_correct_filter_color(image, part_name):
"""Get the correct color filter."""
if part_name == "tit":
# Use combined color filter
f1 = np.asarray([0, 0, 0]) # tit color filter
f2 = np.asarray([10, 10, 10])
f3 = np.asarray([0, 0, 250]) # aur color filter
f4 = np.asarray([0, 0, 255])
color_mask1 = cv2.inRange(image, f1, f2)
color_mask2 = cv2.inRange(image, f3, f4)
color_mask = cv2.bitwise_or(color_mask1, color_mask2) # combine

elif part_name == "aur":
f1 = np.asarray([0, 0, 250]) # aur color filter
f2 = np.asarray([0, 0, 255])
color_mask = cv2.inRange(image, f1, f2)

elif part_name == "vag":
f1 = np.asarray([250, 0, 0]) # vag filter
f2 = np.asarray([255, 0, 0])
color_mask = cv2.inRange(image, f1, f2)

elif part_name == "belly":
f1 = np.asarray([250, 0, 250]) # belly filter
f2 = np.asarray([255, 0, 255])
color_mask = cv2.inRange(image, f1, f2)

return color_mask

bodypart_list = [] # empty BodyPart list

color_mask = get_correct_filter_color(image, part_name)
@@ -138,28 +82,79 @@ def find_body_part(image, part_name):

h, w = normalize_belly_vag(h, part_name, w)

xmax, xmin, ymax, ymin = calculate_bounding_box(h, w, x, y)

bodypart_list.append(BodyPart(part_name, xmin, ymin, xmax, ymax, x, y, w, h))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(h, w, x, y)

bodypart_list.append(
BodyPart(part_name, BoundingBox(xmin, ymin, xmax, ymax), Center(x, y), Dimension(w, h))
)
return bodypart_list


def filter_dim_parts(bp_list, min_area, max_area, min_ar, max_ar):
def detect_direction(a_max, a_min, angle):
"""Detect direction."""
if angle == 0:
h = a_max
w = a_min
else:
h = a_min
w = a_max
return h, w


def normalize_belly_vag(h, part_name, w):
"""Normalize the belly and vag size."""
if part_name in ("belly", "vag"):
if w < 15:
w *= 2
if h < 15:
h *= 2
return h, w


def get_correct_filter_color(image, part_name):
"""Get the correct color filter."""

def get_simple_mask(image, l1, l2):
f1 = np.asarray(l1) # aur color filter
f2 = np.asarray(l2)
color_mask = cv2.inRange(image, f1, f2)
return color_mask

if part_name == "tit":
# Use combined color filter
f1 = np.asarray([0, 0, 0]) # tit color filter
f2 = np.asarray([10, 10, 10])
f3 = np.asarray([0, 0, 250]) # aur color filter
f4 = np.asarray([0, 0, 255])
color_mask1 = cv2.inRange(image, f1, f2)
color_mask2 = cv2.inRange(image, f3, f4)
color_mask = cv2.bitwise_or(color_mask1, color_mask2) # combine

elif part_name == "aur":
color_mask = get_simple_mask(image, [0, 0, 250], [0, 0, 250])

elif part_name == "vag":
color_mask = get_simple_mask(image, [250, 0, 0], [250, 0, 0])

elif part_name == "belly":
color_mask = get_simple_mask(image, [250, 0, 250], [255, 0, 255])

return color_mask


def filter_dim_parts(bp_list, min_max_area, min_max_ar):
"""
Filter a body part list with area and aspect ration.

:param bp_list: BodyPart[]>list
:param min_area: <num> minimum area of part
:param max_area: <num> max area
:param min_ar: <num> min aspect ratio
:param max_ar: <num> max aspect ratio
:param min_max_area: <(num,num)> minimum,max area of part
:param min_max_area: <num> minimum,max aspect ratio
:return: <BodyPart[]>list
"""
b_filt = []

for obj in bp_list:
if min_area < obj.w * obj.h < max_area and min_ar < obj.w / obj.h < max_ar:
if min_max_area[0] < obj.w * obj.h < min_max_area[1] and min_max_ar[0] < obj.w / obj.h < min_max_ar[1]:
b_filt.append(obj)

return b_filt
@@ -167,7 +162,7 @@ def filter_dim_parts(bp_list, min_area, max_area, min_ar, max_ar):

def filter_couple(bp_list):
"""
Filer couple in body part list.
Filter couple in body part list.

:param bp_list: <BodyPart[]>list
:return: <BodyPart[]>list
@@ -180,15 +175,7 @@ def filter_couple(bp_list):
min_b = 1
min_diff = abs(bp_list[min_a].y - bp_list[min_b].y)

for a, _ in enumerate(bp_list):
for b, _ in enumerate(bp_list):
# TODO: avoid repetition (1,0) (0,1)
if a != b:
diff = abs(bp_list[a].y - bp_list[b].y)
if diff < min_diff:
min_diff = diff
min_a = a
min_b = b
min_a, min_b = find_min(bp_list, min_a, min_b, min_diff)

b_filt = [bp_list[min_a], bp_list[min_b]]

@@ -196,3 +183,15 @@ def filter_couple(bp_list):
else:
# No change
return bp_list


def find_min(bp_list, min_a, min_b, min_diff):
for a, _ in enumerate(bp_list):
for b, _ in enumerate(bp_list):
# TODO: avoid repetition (1,0) (0,1)
diff = abs(bp_list[a].y - bp_list[b].y)
if a != b and diff < min_diff:
min_diff = diff
min_a = a
min_b = b
return min_a, min_b

+ 10
- 12
transform/opencv/bodypart/inferrer.py View File

@@ -1,7 +1,7 @@
"""Inference Body part functions."""
import random

from transform.opencv.bodypart import BodyPart
from transform.opencv.bodypart import BodyPart, BoundingBox, Dimension, Center


def infer_nip(aur_list):
@@ -25,12 +25,12 @@ def infer_nip(aur_list):
y = aur.y

# Calculate Bounding Box:
xmin = int(x - (nip_dim / 2))
xmax = int(x + (nip_dim / 2))
ymin = int(y - (nip_dim / 2))
ymax = int(y + (nip_dim / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(nip_dim, nip_dim, x, y)

nip_list.append(BodyPart("nip", xmin, ymin, xmax, ymax, x, y, nip_dim, nip_dim))
nip_list.append(
BodyPart("nip", BoundingBox(xmin, ymin, xmax, ymax), Center(x, y), Dimension(nip_dim, nip_dim)
)
)

return nip_list

@@ -55,12 +55,10 @@ def infer_hair(vag_list, enable):
x = vag.x
y = vag.y - (hair_h / 2) - (vag.h / 2)

# Calculate Bounding Box:
xmin = int(x - (hair_w / 2))
xmax = int(x + (hair_w / 2))
ymin = int(y - (hair_h / 2))
ymax = int(y + (hair_h / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(hair_h,hair_w, x, y)

hair_list.append(BodyPart("hair", xmin, ymin, xmax, ymax, x, y, hair_w, hair_h))
hair_list.append(
BodyPart("nip", BoundingBox(xmin, ymin, xmax, ymax), Center(x, y), Dimension(hair_w, hair_h))
)

return hair_list

+ 42
- 52
transform/opencv/bodypart/resolver.py View File

@@ -1,7 +1,7 @@
"""Inference Body problems resolver."""
import random

from transform.opencv.bodypart import BodyPart
from transform.opencv.bodypart import BodyPart, BoundingBox, Center, Dimension


def detect_tit_aur_missing_problem(tits_list, aur_list):
@@ -44,6 +44,21 @@ def resolve_tit_aur_missing_problems(tits_list, aur_list, problem_code):
:param problem_code: <int> problem code
:return: None
"""

def find_l2_width_is_full(l1, l2):

d1 = abs(l1[0].x - l2[0].x)
d2 = abs(l1[0].x - l2[1].x)
if d1 > d2:
# l1[0] is empty
new_x = l2[0].x
new_y = l2[0].y
else:
# l1[1] is empty
new_x = l2[1].x
new_y = l2[1].y
return new_x, new_y

def resolve_problem_3():
random_tit_factor = random.randint(2, 5) # TOTEST

@@ -52,46 +67,33 @@ def resolve_tit_aur_missing_problems(tits_list, aur_list, problem_code):
new_x = aur_list[0].x
new_y = aur_list[0].y

xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(new_w, new_w, new_x, new_y)

tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
BodyPart.add_body_part_to_list("tit", BoundingBox(xmin, ymin, xmax, ymax), Center(new_x, new_y),
Dimension(new_w, new_w), tits_list)

# Add the second tit:
new_w = aur_list[1].w * random_tit_factor # TOTEST
new_x = aur_list[1].x
new_y = aur_list[1].y

xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(new_w, new_w, new_x, new_y)

tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
BodyPart.add_body_part_to_list("tit", BoundingBox(xmax, xmin, ymax, ymin), Center(new_x, new_y),
Dimension(new_w, new_w), tits_list)

def resolve_problem_6():
# Find witch aur is full:
d1 = abs(tits_list[0].x - aur_list[0].x)
d2 = abs(tits_list[0].x - aur_list[1].x)

if d1 > d2:
# aur[0] is empty
new_x = aur_list[0].x
new_y = aur_list[0].y
else:
# aur[1] is empty
new_x = aur_list[1].x
new_y = aur_list[1].y
# Find width aur is full:
new_x, new_y = find_l2_width_is_full(tits_list, aur_list)
new_w = tits_list[0].w / 2

# Calculate Bounding Box:
xmin = int(new_x - (tits_list[0].w / 2))
xmax = int(new_x + (tits_list[0].w / 2))
ymin = int(new_y - (tits_list[0].w / 2))
ymax = int(new_y + (tits_list[0].w / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(new_w, new_w, new_x, new_y)

tits_list.append(BodyPart("tit", xmin, ymin, xmax, ymax, new_x, new_y, tits_list[0].w, tits_list[0].w))
tits_list.append(
BodyPart("tit", BoundingBox(xmin, ymin, xmax, ymax), Center(new_x, new_y),
Dimension(tits_list[0].w, tits_list[0].w))
)

def resolve_problem_7():
# Add the first aur:
@@ -99,52 +101,39 @@ def resolve_tit_aur_missing_problems(tits_list, aur_list, problem_code):
new_x = tits_list[0].x
new_y = tits_list[0].y

xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(new_w, new_w, new_x, new_y)

aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
BodyPart("aur", BoundingBox(xmin, ymin, xmax, ymax), Center(new_x, new_y), Dimension(new_w, new_w))

# Add the second aur:
new_w = tits_list[1].w * random.uniform(0.03, 0.1) # TOTEST
new_x = tits_list[1].x
new_y = tits_list[1].y

xmin = int(new_x - (new_w / 2))
xmax = int(new_x + (new_w / 2))
ymin = int(new_y - (new_w / 2))
ymax = int(new_y + (new_w / 2))
xmax, xmin, ymax, ymin = BoundingBox.calculate_bounding_box(new_w, new_w, new_x, new_y)

aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, new_w, new_w))
BodyPart.add_body_part_to_list("aur", BoundingBox(xmin, ymin, xmax, ymax), Center(new_x, new_y),
Dimension(new_w, new_w), aur_list)

def resolve_problem_8():
# Find witch tit is full:
d1 = abs(aur_list[0].x - tits_list[0].x)
d2 = abs(aur_list[0].x - tits_list[1].x)

if d1 > d2:
# tit[0] is empty
new_x = tits_list[0].x
new_y = tits_list[0].y
else:
# tit[1] is empty
new_x = tits_list[1].x
new_y = tits_list[1].y
# Find width tit is full
new_x, new_y = find_l2_width_is_full(aur_list, tits_list)

# Calculate Bounding Box:
xmin = int(new_x - (aur_list[0].w / 2))
xmax = int(new_x + (aur_list[0].w / 2))
ymin = int(new_y - (aur_list[0].w / 2))
ymax = int(new_y + (aur_list[0].w / 2))
aur_list.append(BodyPart("aur", xmin, ymin, xmax, ymax, new_x, new_y, aur_list[0].w, aur_list[0].w))

BodyPart.add_body_part_to_list("aur", BoundingBox(xmin, ymin, xmax, ymax), Center(new_x, new_y),
Dimension(aur_list[0].w, aur_list[0].w), aur_list)

{
3: resolve_problem_3,
6: resolve_problem_6,
7: resolve_problem_7,
8: resolve_problem_8,
}.get(problem_code, lambda _: _)()
}.get(problem_code, lambda: None)()


def detect_tit_aur_position_problem(tits_list, aur_list):
@@ -155,6 +144,7 @@ def detect_tit_aur_position_problem(tits_list, aur_list):
:param aur_list: <BodyPart[]> aur list
:return: <Boolean>
"""

def detect_tits_too_narrow_horizontally():
diff_tits_x = abs(tits_list[0].x - tits_list[1].x)
return diff_tits_x < 40

+ 52
- 52
transform/opencv/mask.py View File

@@ -95,57 +95,6 @@ class MaskdetToMaskfin(MaskImageTransformOpenCV):
def to_int(a, b):
return int(round(a * float(b)))

def draw_bodypart_details(bodypart_list, details, to_int):
# Draw body part in details image:
for obj in bodypart_list:

if obj.w < obj.h:
a_max = int(obj.h / 2) # asse maggiore
a_min = int(obj.w / 2) # asse minore
angle = 0 # angle
else:
a_max = int(obj.w / 2)
a_min = int(obj.h / 2)
angle = 90

x = int(obj.x)
y = int(obj.y)

aurmax = to_int(self.__aur_size, a_max)
aurmin = to_int(self.__aur_size, a_min)
nipmax = to_int(self.__nip_size, a_max)
nipmin = to_int(self.__nip_size, a_min)
titmax = to_int(self.__tit_size, a_max)
titmin = to_int(self.__tit_size, a_min)
vagmax = to_int(self.__vag_size, a_max)
vagmin = to_int(self.__vag_size, a_min)
hairmax = to_int(self.__hair_size, a_max)
hairmin = to_int(self.__hair_size, a_min)

draw_ellipse(a_max, a_min, angle, aurmax, aurmin, details, hairmax, hairmin, nipmax, nipmin, obj,
titmax, titmin, vagmax, vagmin, x, y)

def draw_ellipse(a_max, a_min, angle, aurmax, aurmin, details, hairmax, hairmin, nipmax, nipmin, obj,
titmax,
titmin, vagmax, vagmin, x, y):
# Draw ellipse
if obj.name == "tit":
cv2.ellipse(details, (x, y), (titmax, titmin), angle, 0, 360, (0, 205, 0), -1) # (0,0,0,50)
elif obj.name == "aur":
cv2.ellipse(details, (x, y), (aurmax, aurmin), angle, 0, 360, (0, 0, 255), -1) # red
elif obj.name == "nip":
cv2.ellipse(details, (x, y), (nipmax, nipmin), angle, 0, 360, (255, 255, 255), -1) # white
elif obj.name == "belly":
cv2.ellipse(details, (x, y), (a_max, a_min), angle, 0, 360, (255, 0, 255), -1) # purple
elif obj.name == "vag":
cv2.ellipse(details, (x, y), (vagmax, vagmin), angle, 0, 360, (255, 0, 0), -1) # blue
elif obj.name == "hair":
xmin = x - hairmax
ymin = y - hairmin
xmax = x + hairmax
ymax = y + hairmax
cv2.rectangle(details, (xmin, ymin), (xmax, ymax), (100, 100, 100), -1)

enable_pubes = (self.__hair_size > 0)

# Create a total green image, in which draw details ellipses
@@ -158,7 +107,7 @@ class MaskdetToMaskfin(MaskImageTransformOpenCV):
# Check if the list is not empty:
if bodypart_list:

draw_bodypart_details(bodypart_list, details, to_int)
self.__draw_bodypart_details(bodypart_list, details, to_int)

# Define the green color filter
f1 = np.asarray([0, 250, 0]) # green color filter
@@ -177,3 +126,54 @@ class MaskdetToMaskfin(MaskImageTransformOpenCV):
# Compone:
maskfin = cv2.add(res1, res2)
return maskfin

def __draw_bodypart_details(self, bodypart_list, details, to_int):
# Draw body part in details image:
for obj in bodypart_list:

if obj.w < obj.h:
a_max = int(obj.h / 2) # asse maggiore
a_min = int(obj.w / 2) # asse minore
angle = 0 # angle
else:
a_max = int(obj.w / 2)
a_min = int(obj.h / 2)
angle = 90

x = int(obj.x)
y = int(obj.y)

aurmax = to_int(self.__aur_size, a_max)
aurmin = to_int(self.__aur_size, a_min)
nipmax = to_int(self.__nip_size, a_max)
nipmin = to_int(self.__nip_size, a_min)
titmax = to_int(self.__tit_size, a_max)
titmin = to_int(self.__tit_size, a_min)
vagmax = to_int(self.__vag_size, a_max)
vagmin = to_int(self.__vag_size, a_min)
hairmax = to_int(self.__hair_size, a_max)
hairmin = to_int(self.__hair_size, a_min)

self.__draw_ellipse(a_max, a_min, angle, aurmax, aurmin, details, hairmax, hairmin, nipmax, nipmin, obj,
titmax, titmin, vagmax, vagmin, x, y)

@staticmethod
def __draw_ellipse(a_max, a_min, angle, aurmax, aurmin, details, hairmax, hairmin, nipmax, nipmin, obj,
titmax, titmin, vagmax, vagmin, x, y):
# Draw ellipse
if obj.name == "tit":
cv2.ellipse(details, (x, y), (titmax, titmin), angle, 0, 360, (0, 205, 0), -1) # (0,0,0,50)
elif obj.name == "aur":
cv2.ellipse(details, (x, y), (aurmax, aurmin), angle, 0, 360, (0, 0, 255), -1) # red
elif obj.name == "nip":
cv2.ellipse(details, (x, y), (nipmax, nipmin), angle, 0, 360, (255, 255, 255), -1) # white
elif obj.name == "belly":
cv2.ellipse(details, (x, y), (a_max, a_min), angle, 0, 360, (255, 0, 255), -1) # purple
elif obj.name == "vag":
cv2.ellipse(details, (x, y), (vagmax, vagmin), angle, 0, 360, (255, 0, 0), -1) # blue
elif obj.name == "hair":
xmin = x - hairmax
ymin = y - hairmin
xmax = x + hairmax
ymax = y + hairmax
cv2.rectangle(details, (xmin, ymin), (xmax, ymax), (100, 100, 100), -1)

+ 14
- 27
transform/opencv/resize.py View File

@@ -37,7 +37,7 @@ class ImageToCrop(ImageTransformOpenCV):
return args[0][self.__y1:self.__y2, self.__x1:self.__x2]


class ImageToOverlay(ImageTransformOpenCV):
class ImageToOverlay(ImageToCrop):
"""Image -> Overlay [OPENCV]."""

def __init__(self, input_index=(0, -1), args=None):
@@ -48,11 +48,7 @@ class ImageToOverlay(ImageTransformOpenCV):
and previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(input_index=input_index, args=args,)
self.__x1 = self._args['overlay'][0]
self.__y1 = self._args['overlay'][1]
self.__x2 = self._args['overlay'][2]
self.__y2 = self._args['overlay'][3]
super().__init__(input_index=input_index, args=args, )

def _execute(self, *args):
"""
@@ -81,18 +77,20 @@ class ImageToResized(ImageTransformOpenCV):
"""Image -> Resized [OPENCV]."""

def _execute(self, *args):
"""
Resize an image.
new_size = self._calculate_new_size(args[0])
img = cv2.resize(args[0], (new_size[1], new_size[0]))
return self._make_new_image(img, new_size)

:param args: <[RGB]> image to resize
:return: <RGB> image
"""
old_size = args[0].shape[:2]
@staticmethod
def _calculate_new_size(img):
old_size = img.shape[:2]
ratio = float(Conf.desired_size) / max(old_size)
new_size = tuple([int(x * ratio) for x in old_size])

img = cv2.resize(args[0], (new_size[1], new_size[0]))
return new_size

@staticmethod
def _make_new_image(img, new_size):
delta_w = Conf.desired_size - new_size[1]
delta_h = Conf.desired_size - new_size[0]
top, bottom = delta_h // 2, delta_h - (delta_h // 2)
@@ -101,22 +99,11 @@ class ImageToResized(ImageTransformOpenCV):
return cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[255, 255, 255])


class ImageToResizedCrop(ImageTransformOpenCV):
class ImageToResizedCrop(ImageToResized):
"""Image -> Resized Crop [OPENCV]."""

def _execute(self, *args):
"""
Resize and crop an image.

:param args: <[RGB]> image to resize and crop
:return: <RGB> image
"""
old_size = args[0].shape[:2]
ratio = float(Conf.desired_size) / min(old_size)
new_size = tuple([int(x * ratio) for x in old_size])

img = cv2.resize(args[0], (new_size[1], new_size[0]))

@staticmethod
def _make_new_image(img, new_size):
delta_w = new_size[1] - Conf.desired_size
delta_h = new_size[0] - Conf.desired_size
top = delta_h // 2

+ 28
- 2
utils.py View File

@@ -1,4 +1,5 @@
"""Utilities functions."""
import json
import logging
import os
import sys
@@ -125,6 +126,21 @@ def cv2_supported_extension():
".pbm", ".pgm", "ppm", ".sr", ".ras", ".tiff", ".tif"]


def load_json(a):
"""
Load a json form file or string.

:param a: <string> Path of the file to load or a json string
:return: <dict> json structure
"""
if os.path.isfile(a):
with open(a, 'r') as f:
j = json.load(f)
else:
j = json.loads(str(a))
return j


def json_to_argv(data):
"""
Json to args parameters.
@@ -169,7 +185,7 @@ def dl_file(url, file_path):
f.write(data)
done = int(50 * dl / total_length)
print("[{}{}]".format('=' * done, ' ' * (50 - done)), end="\r")
print(" "*80, end="\r")
print(" " * 80, end="\r")
return file_path


@@ -194,4 +210,14 @@ def unzip(zip_path, extract_path):
print("[{}{}]".format('=' * done, ' ' * (50 - done)), end="\r")
zf.extract(file, path=extract_path)
extracted_size += file.file_size
print(" "*80, end="\r")
print(" " * 80, end="\r")


def is_a_supported_image_file_extension(path):
"""
Return true if the file is an image file supported extensions.

:param path: <sting> path of the file to check
:return: <boolean> True if the extension is supported
"""
return os.path.splitext(path)[1] in cv2_supported_extension() + [".gif"]

Loading…
Cancel
Save