Browse Source

Rework Transform Instanciation

Transform are now instanciate one time (first use). This modification
allow to load on memory GAN model one time given a huge gain on
multiply computation (tested on sample_dir give me 162 seconds to 92 seconds (amd FX feel bad)).
This modification break cpu multiprocessing support until a multiprocessing manager
is implement to avoid race condition. Also added --disable-persistent-gan to
disable in memory persitent gan for low memory environnement.
tags/v1.2.10
PommeDroid 10 months ago
parent
commit
b466619096

+ 4
- 3
argv/run/__init__.py View File

@@ -2,8 +2,8 @@ import main
from argv.checkpoints import arg_checkpoints
from argv.common import arg_debug, arg_help, arg_version
from argv.run.argument import arg_altered, arg_auto_rescale, arg_auto_resize, arg_auto_resize_crop, arg_color_transfer,\
arg_cpu, arg_gpu, arg_ignore_size, arg_input, arg_json_args, arg_json_folder_name, arg_n_core, arg_n_run, \
arg_output, arg_overlay, arg_preferences, arg_step
arg_cpu, arg_gpu, arg_ignore_size, arg_input, arg_json_args, arg_json_folder_name, arg_n_run, \
arg_output, arg_overlay, arg_preferences, arg_step, arg_gan_persistent


def init_run_parser(subparsers):
@@ -39,7 +39,8 @@ def init_run_parser(subparsers):
arg_cpu(processing_mod)
arg_gpu(processing_mod)
arg_checkpoints(run_parser)
arg_n_core(run_parser)
# arg_n_core(run_parser) TODO Broken Fix this
arg_gan_persistent(run_parser)

arg_json_args(run_parser)
arg_json_folder_name(run_parser)

+ 9
- 0
argv/run/argument.py View File

@@ -63,6 +63,15 @@ def arg_gpu(parser):
)


def arg_gan_persistent(parser):
parser.add_argument(
"--disable-persistent-gan",
action="store_true",
help="Disable persistent in memory gan model."
"Reduce memory usage but increase computation time on multiple processing."
)


def arg_ignore_size(parser):
parser.add_argument(
"--ignore-size",

+ 2
- 1
config.py View File

@@ -60,4 +60,5 @@ class Config:

:return: <boolean> True is multiprocessing can be use
"""
return Config.args['gpu_ids'] is None and Config.args['n_cores'] > 1
# return Config.args['gpu_ids'] is None and Config.args['n_cores'] > 1
return False # TODO Fix multiprocessing

+ 4
- 13
processing/__init__.py View File

@@ -8,17 +8,7 @@ from utils import camel_case_to_str, cv2_supported_extension

class Processing:
""" Abstract Processing Class """
def __init__(self, args=None):
"""
Image Processing Class Constructor.

:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
self.__start = time.time()
self._args = Conf.args.copy() if args is None else args.copy()

def run(self, *args):
def run(self, *args, config=None):
"""
Run the Image Transform.

@@ -26,6 +16,7 @@ class Processing:
:return: <RGB> image
"""
self.__start = time.time()
self._args = Conf.args.copy() if config is None else config.copy()
self._info_start_run()
self._setup(*args)
r = self._execute(*args)
@@ -101,9 +92,9 @@ class SimpleProcessing(Processing):

if os.path.splitext(args['input'])[1] == ".gif":
from processing.gif import GifProcessing
return GifProcessing(args=args)
return GifProcessing()
elif os.path.splitext(args['input'])[1] in cv2_supported_extension():
from processing.image import ImageProcessing
return ImageProcessing(args=args)
return ImageProcessing()
else:
return None

+ 7
- 17
processing/folder.py View File

@@ -14,22 +14,14 @@ from utils import is_a_supported_image_file_extension

class FolderImageProcessing(MultipleImageProcessing):
"""Folder Image Processing Class."""

def __init__(self, args=None):
"""
Folder Image Transform Constructor.

:param args: <dict> args parameter to run images transformations (default use Conf.args
"""
super().__init__(args=args)
self.__input_folder_path = self._args['input']
self.__output_folder_path = self._args['output']
self.__multiprocessing = Conf.multiprocessing()

def _setup(self, *args):
Conf.log.debug([(r, d, f) for r, d, f in os.walk(self.__input_folder_path)])
self._input_folder_path = self._args['input']
self._output_folder_path = self._args['output']
self._multiprocessing = Conf.multiprocessing()
self._process_list = []
Conf.log.debug([(r, d, f) for r, d, f in os.walk(self._input_folder_path)])

for r, _, _ in os.walk(self.__input_folder_path):
for r, _, _ in os.walk(self._input_folder_path):
args = copy.deepcopy(self._args)
args['input'] = [
x.path for x in os.scandir(r) if x.is_file() and is_a_supported_image_file_extension(x.path)
@@ -51,9 +43,7 @@ class FolderImageProcessing(MultipleImageProcessing):
]

self._process_list.append(
MultipleImageProcessing(
args=self.__get_folder_args(args, r)
)
(MultipleImageProcessing(), self.__get_folder_args(args, r))
)

@staticmethod

+ 2
- 11
processing/gif.py View File

@@ -15,22 +15,13 @@ from utils import write_image

class GifProcessing(Processing):
"""GIF Image Processing Class."""

def __init__(self, args=None):
"""
Image Transform GIF Constructor.

:param args: <dict> args parameter to run images transformations (default use Conf.args)
"""
super().__init__(args=args)
def _setup(self, *args):
self.__phases = select_phases(self._args)
self.__input_path = args['input']
self.__output_path = args['output']
self.__tmp_dir = None
self.__temp_input_paths = []
self.__temp_output_paths = []

def _setup(self, *args):
self.__tmp_dir = tempfile.mkdtemp()
Conf.log.debug("Temporay dir is {}".format(self.__tmp_dir))
imgs = imageio.mimread(self.__input_path)
@@ -51,7 +42,7 @@ class GifProcessing(Processing):

:return: None
"""
MultipleImageProcessing(args=self._args).run()
MultipleImageProcessing().run(config=self._args)

dir_out = os.path.dirname(self.__output_path)
if dir_out != '':

+ 6
- 11
processing/image.py View File

@@ -5,20 +5,19 @@ import sys
from config import Config as Conf
from processing import Processing
from processing.utils import select_phases
from processing.worker import get_worker
from utils import camel_case_to_str, write_image
from loader import Loader


class ImageProcessing(Processing):
"""Image Processing Class."""

def __init__(self, args=None):
def _setup(self, *args):
"""
Process Image Constructor.

:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(args=args)
self.__phases = select_phases(self._args)
self.__input_path = self._args['input']
self.__output_path = self._args['output']
@@ -36,16 +35,12 @@ class ImageProcessing(Processing):
os.path.join(path, "{}.png".format(p().__class__.__name__))
for p in self.__phases[:self.__starting_step]
]
Conf.log.debug(self.__image_steps)

def _info_start_run(self):
super()._info_start_run()
Conf.log.info("Processing on {}".format(str(self.__image_steps)[2:-2]))
Conf.log.debug(self.__image_steps)

def _setup(self, *args):
try:
self.__image_steps = [
(Loader.get_loader(x)).run(x) if isinstance(x, str) else x for x in self.__image_steps
(Loader.get_loader(x)).load(x) if isinstance(x, str) else x for x in self.__image_steps
]
except FileNotFoundError as e:
Conf.log.error(e)
@@ -61,8 +56,8 @@ class ImageProcessing(Processing):

:return: None
"""
for p in (x(args=self._args) for x in self.__phases[self.__starting_step:self.__ending_step]):
r = p.run(*[self.__image_steps[i] for i in p.input_index])
for p in (get_worker(x) for x in self.__phases[self.__starting_step:self.__ending_step]):
r = p.run(*[self.__image_steps[i] for i in p.input_index], config=self._args)
self.__image_steps.append(r)

if self.__altered_path:

+ 14
- 22
processing/multiple.py View File

@@ -9,29 +9,26 @@ from utils import camel_case_to_str

class MultipleImageProcessing(Processing):
"""Multiple Image Processing Class."""

def __init__(self, args=None, children_process=SimpleProcessing):
"""
Process Multiple Images Constructor.

:param children_process: <ImageTransform> Process to use on the list of input
:param args: args: <dict> args parameter to run images transformations (default use Conf.args)
"""
super().__init__(args=args)
def _setup(self, *args):
self._input_paths = self._args['input']
self._output_paths = self._args['output']
self._process_list = []
self.__multiprocessing = Conf.multiprocessing()
self.__children_process = children_process
self._multiprocessing = Conf.multiprocessing()

def _setup(self, *args):
self._process_list = []

for input_path, output_path in zip(self._input_paths, self._output_paths):
args = copy.deepcopy(self._args)
args['input'] = input_path
args['output'] = output_path
self._process_list.append(self.__children_process(args=args))
self._process_list.append((SimpleProcessing(args), args))
Conf.log.debug(self._process_list)

def _process_one(self, a):
Conf.log.info("{} : {}/{}".format(
camel_case_to_str(self.__class__.__name__), a[1] + 1, len(self._process_list)
))
a[0][0].run(config=a[0][1])

def _execute(self, *args):
"""
@@ -39,18 +36,13 @@ class MultipleImageProcessing(Processing):

:return: None
"""
def process_one_image(a):
Conf.log.info("{} : {}/{}".format(
camel_case_to_str(self.__class__.__name__), a[1] + 1, len(self._process_list)
))
a[0].run()

if not self.__multiprocessing:
if not self._multiprocessing:
for x in zip(self._process_list, range(len(self._process_list))):
process_one_image(x)
self._process_one(x)

else:
Conf.log.debug("Using Multiprocessing")
pool = ThreadPool(Conf.args['n_cores'])
pool.map(process_one_image, zip(self._process_list, range(len(self._process_list))))
pool.map(self._process_one, zip(self._process_list, range(len(self._process_list))))
pool.close()
pool.join()

+ 9
- 13
processing/utils.py View File

@@ -1,10 +1,6 @@
import os

from config import Config as Conf
from transform.gan.mask import CorrectToMask, MaskrefToMaskdet, MaskfinToNude
from transform.opencv.correct import DressToCorrect, ColorTransfer
from transform.opencv.mask import MaskToMaskref, MaskdetToMaskfin
from transform.opencv.resize import ImageToResized, ImageToCrop, ImageToOverlay, ImageToResizedCrop, ImageToRescale
from utils import check_shape


@@ -41,22 +37,22 @@ def add_head(args, p, add):


def overlay(args, p):
p = add_tail(args, p, ImageToResized)
p = add_tail(args, p, ImageToCrop)
p = add_head(args, p, ImageToOverlay)
p = add_tail(args, p, "ImageToResized")
p = add_tail(args, p, "ImageToCrop")
p = add_head(args, p, "ImageToOverlay")
return p


def auto_resize(args, p):
return add_tail(args, p, ImageToResized)
return add_tail(args, p, "ImageToResized")


def auto_resize_crop(args, p):
return add_tail(args, p, ImageToResizedCrop)
return add_tail(args, p, "ImageToResizedCrop")


def auto_rescale(args, p):
return add_tail(args, p, ImageToRescale)
return add_tail(args, p, "ImageToRescale")


def is_file(args):
@@ -82,12 +78,12 @@ def select_phases(args):
:return: <ImageTransform[]> list of image transformation
"""

phases = [DressToCorrect, CorrectToMask, MaskToMaskref,
MaskrefToMaskdet, MaskdetToMaskfin, MaskfinToNude]
phases = ["DressToCorrect", "CorrectToMask", "MaskToMaskref",
"MaskrefToMaskdet", "MaskdetToMaskfin", "MaskfinToNude"]

phases = scale_mod(args, phases)

if args['color_transfer']:
phases = add_head(args, phases, ColorTransfer)
phases = add_head(args, phases, "ColorTransfer")

return phases

+ 31
- 0
processing/worker.py View File

@@ -0,0 +1,31 @@
""" Wokers definition """
# TODO Implement this with a queue and mutliprocessing
import inspect

from transform.gan.mask import CorrectToMask, MaskrefToMaskdet, MaskfinToNude
from transform.opencv.correct import DressToCorrect, ColorTransfer
from transform.opencv.mask import MaskToMaskref, MaskdetToMaskfin
from transform.opencv.resize import ImageToResized, ImageToCrop, ImageToOverlay, ImageToResizedCrop, ImageToRescale

workers = {
"DressToCorrect": DressToCorrect,
"CorrectToMask": CorrectToMask,
"MaskToMaskref": MaskToMaskref,
"MaskrefToMaskdet": MaskrefToMaskdet,
"MaskdetToMaskfin": MaskdetToMaskfin,
"MaskfinToNude": MaskfinToNude,
"ImageToResized": ImageToResized,
"ImageToCrop": ImageToCrop,
"ImageToOverlay": ImageToOverlay,
"ImageToResizedCrop": ImageToResizedCrop,
"ImageToRescale": ImageToRescale,
"ColorTransfer": ColorTransfer
}


def get_worker(name):
w = workers.get(name)
if inspect.isclass(w):
w = w()
workers[name] = w
return w

+ 2
- 7
transform/__init__.py View File

@@ -1,14 +1,11 @@
"""Images Transforms."""
import time

from config import Config as Conf
from processing import Processing


class ImageTransform(Processing):
"""Abstract Image Transformation Class."""

def __init__(self, input_index=(-1,), args=None):
def __init__(self, input_index=(-1,)):
"""
Image Transformation Class Constructor.

@@ -16,7 +13,5 @@ class ImageTransform(Processing):
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""

super().__init__(args)
self.__start = time.time()
super().__init__()
self.input_index = input_index
self._args = Conf.args.copy() if args is None else args.copy()

+ 39
- 18
transform/gan/__init__.py View File

@@ -1,6 +1,8 @@
"""GAN Transforms."""
import time

import cv2
import torch

from config import Config as Conf
from transform import ImageTransform
@@ -11,7 +13,7 @@ from transform.gan.model import DeepModel, DataLoader
class ImageTransformGAN(ImageTransform):
"""Abstract GAN Image Transformation Class."""

def __init__(self, checkpoint, phase, input_index=(-1,), args=None):
def __init__(self, checkpoint, phase, input_index=(-1,)):
"""
Abstract GAN Image Transformation Class Constructor.

@@ -20,32 +22,39 @@ class ImageTransformGAN(ImageTransform):
:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(input_index=input_index, args=args)
self.__checkpoint = checkpoint
self.__phase = phase
self.__gpu_ids = self._args["gpu_ids"]
super().__init__(input_index=input_index)

self._checkpoint = checkpoint
self._phase = phase
self._persistent = not Conf.args["disable_persistent_gan"]
self._gpu_ids = Conf.args["gpu_ids"]

if self._persistent:
self.__init_model()

def __init_model(self):
start = time.time()
Conf.log.debug("Loading Model Start")
Conf.log.info("Loading GAN Model For {}".format(self._phase))
# Create Model
self.__model = DeepModel()
self.__model.initialize(Conf(), self._gpu_ids, self._checkpoint)
Conf.log.debug("Model load done in {} seconds".format(round(time.time() - start, 2)))

def _setup(self, *args):
"""
Load Dataset and Model fot the image.
Load Dataset and Model for the image.

:param args: <[RGB]> image to be transform
:return: None
"""
if self.__gpu_ids:
Conf.log.debug("GAN Processing Using GPU IDs: {}".format(self.__gpu_ids))
if self._gpu_ids:
Conf.log.debug("GAN Processing will use GPU IDs: {}".format(self._gpu_ids))
else:
Conf.log.debug("GAN Processing Using CPU")

c = Conf()

# Load custom phase options:
data_loader = DataLoader(c, args[0])
self.__dataset = data_loader.load_data()
Conf.log.debug("GAN Processing will use CPU")

# Create Model
self.__model = DeepModel()
self.__model.initialize(c, self.__gpu_ids, self.__checkpoint)
if not self._persistent:
self.__init_model()

def _execute(self, *args):
"""
@@ -54,9 +63,21 @@ class ImageTransformGAN(ImageTransform):
:param *args: <[RGB]> image to transform
:return: <RGB> image transformed
"""
c = Conf()

# Load custom phase options:
data_loader = DataLoader(c, args[0])
self.__dataset = data_loader.load_data()

mask = None
for data in self.__dataset:
generated = self.__model.inference(data["label"], data["inst"])
im = tensor2im(generated.data[0])
mask = cv2.cvtColor(im, cv2.COLOR_RGB2BGR)
return mask

def _clean(self, *args):
if not self._persistent:
del self.__dataset
del self.__model
torch.cuda.empty_cache()

+ 9
- 9
transform/gan/mask.py View File

@@ -4,7 +4,7 @@ from config import Config as Conf


class MaskImageTransformGAN(ImageTransformGAN):
def __init__(self, mask_name, input_index=(-1,), args=None):
def __init__(self, mask_name, input_index=(-1,)):
"""
Correct To Mask constructor.

@@ -12,47 +12,47 @@ class MaskImageTransformGAN(ImageTransformGAN):
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(
(args if args is not None else Conf.args)['checkpoints'][mask_name],
Conf.args['checkpoints'][mask_name],
mask_name,
input_index=input_index,
args=args
)


class CorrectToMask(MaskImageTransformGAN):
"""Correct -> Mask [GAN]."""

def __init__(self, input_index=(-1,), args=None):
def __init__(self, input_index=(-1,)):
"""
Correct To Mask constructor.

:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__("correct_to_mask", input_index=input_index, args=args)
Conf.log.debug("HERE")
super().__init__("correct_to_mask", input_index=input_index)


class MaskrefToMaskdet(MaskImageTransformGAN):
"""Maskref -> Maskdet [GAN]."""

def __init__(self, input_index=(-1,), args=None):
def __init__(self, input_index=(-1,)):
"""
Maskref To Maskdet constructor.

:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__("maskref_to_maskdet", input_index=input_index, args=args)
super().__init__("maskref_to_maskdet", input_index=input_index)


class MaskfinToNude(MaskImageTransformGAN):
"""Maskfin -> Nude [GAN]."""

def __init__(self, input_index=(-1,), args=None):
def __init__(self, input_index=(-1,)):
"""
Maskfin To Nude constructor.

:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__("maskfin_to_nude", input_index=input_index, args=args)
super().__init__("maskfin_to_nude", input_index=input_index)

+ 2
- 2
transform/opencv/__init__.py View File

@@ -5,11 +5,11 @@ from transform import ImageTransform
class ImageTransformOpenCV(ImageTransform):
"""OPENCV Image Transform class."""

def __init__(self, input_index=(-1,), args=None,):
def __init__(self, input_index=(-1,)):
"""
Image Transform OpenCV Constructor.

:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(args=args, input_index=input_index)
super().__init__(input_index=input_index)

+ 2
- 2
transform/opencv/correct.py View File

@@ -100,7 +100,7 @@ class DressToCorrect(ImageTransformOpenCV):
class ColorTransfer(ImageTransformOpenCV):
"""ColorTransfer [OPENCV]."""

def __init__(self, input_index=(0, -1), args=None):
def __init__(self, input_index=(0, -1)):
"""
Color Transfer constructor.

@@ -108,7 +108,7 @@ class ColorTransfer(ImageTransformOpenCV):
for first and previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(input_index=input_index, args=args)
super().__init__(input_index=input_index)

def _execute(self, *args):
"""

+ 6
- 4
transform/opencv/mask.py View File

@@ -10,7 +10,7 @@ from transform.opencv.bodypart.extract import extract_annotations
class MaskImageTransformOpenCV(ImageTransformOpenCV):
"""Mask Image Transform OpenCV."""

def __init__(self, input_index=(-2, -1), args=None):
def __init__(self, input_index=(-2, -1)):
"""
Mask Image Transform OpenCV constructor.

@@ -18,7 +18,7 @@ class MaskImageTransformOpenCV(ImageTransformOpenCV):
for the two previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(args=args, input_index=input_index)
super().__init__(input_index=input_index)


class MaskToMaskref(MaskImageTransformOpenCV):
@@ -61,7 +61,7 @@ class MaskToMaskref(MaskImageTransformOpenCV):
class MaskdetToMaskfin(MaskImageTransformOpenCV):
"""Maskdet -> Maskfin [OPENCV]."""

def __init__(self, input_index=(-2, -1), args=None,):
def __init__(self, input_index=(-2, -1)):
"""
Maskdet To Maskfin constructor.

@@ -69,7 +69,9 @@ class MaskdetToMaskfin(MaskImageTransformOpenCV):
for the two previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(input_index=input_index, args=args)
super().__init__(input_index=input_index)

def _setup(self, *args):
self.__aur_size = self._args["prefs"]["aursize"]
self.__nip_size = self._args["prefs"]["nipsize"]
self.__tit_size = self._args["prefs"]["titsize"]

+ 4
- 4
transform/opencv/resize.py View File

@@ -10,14 +10,14 @@ from transform.opencv.correct import DressToCorrect
class ImageToCrop(ImageTransformOpenCV):
"""Image -> Crop [OPENCV]."""

def __init__(self, input_index=(-1,), args=None):
def __init__(self, input_index=(-1,)):
"""
Image To Crop Constructor.

:param input_index: <tuple> index where to take the inputs (default is (-1) for previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(args=args, input_index=input_index)
super().__init__(input_index=input_index)
self.__x1 = self._args['overlay'][0]
self.__y1 = self._args['overlay'][1]
self.__x2 = self._args['overlay'][2]
@@ -40,7 +40,7 @@ class ImageToCrop(ImageTransformOpenCV):
class ImageToOverlay(ImageToCrop):
"""Image -> Overlay [OPENCV]."""

def __init__(self, input_index=(0, -1), args=None):
def __init__(self, input_index=(0, -1)):
"""
Image To Crop Overlay.

@@ -48,7 +48,7 @@ class ImageToOverlay(ImageToCrop):
and previous transformation)
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
"""
super().__init__(input_index=input_index, args=args, )
super().__init__(input_index=input_index)

def _execute(self, *args):
"""

+ 2
- 2
transform/opencv/watermark.py View File

@@ -8,7 +8,7 @@ from transform.opencv import ImageTransformOpenCV
class ImageToWatermark(ImageTransformOpenCV):
"""Image -> Watermarked Image [OPENCV]."""

def __init__(self, input_index=(-1,), args=None, watermark="fake.png"):
def __init__(self, input_index=(-1,), watermark="fake.png"):
"""
Image To Watermark constructor.

@@ -16,7 +16,7 @@ class ImageToWatermark(ImageTransformOpenCV):
:param args: <dict> args parameter to run the image transformation (default use Conf.args)
:param watermark: <string> path to the watermark image
"""
super().__init__(args=args, input_index=input_index)
super().__init__(input_index=input_index)
self.__watermark = cv2.imread(watermark, cv2.IMREAD_UNCHANGED)

def _execute(self, *args):

Loading…
Cancel
Save