From 6bc97ef017cf2ff95b1d9e8a347e0e7fca16fcb4 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 20 Jan 2021 22:52:16 +0000 Subject: [PATCH 001/110] Face and features detection using MTCNN (Tensorflow & Keras) --- .../classifiers/face_detection/__init__.py | 1 + photonix/classifiers/face_detection/model.py | 81 +++ .../face_detection/mtcnn/__init__.py | 30 ++ .../mtcnn/exceptions/__init__.py | 26 + .../mtcnn/exceptions/invalid_image.py | 30 ++ .../face_detection/mtcnn/layer_factory.py | 227 ++++++++ .../classifiers/face_detection/mtcnn/mtcnn.py | 500 ++++++++++++++++++ .../face_detection/mtcnn/network.py | 111 ++++ .../face_detection/mtcnn/network/__init__.py | 24 + .../face_detection/mtcnn/network/factory.py | 131 +++++ photonix/classifiers/object/model.py | 7 +- photonix/classifiers/runners.py | 9 + photonix/classifiers/style/model.py | 2 +- ...classification_face_detection_processor.py | 22 + photonix/photos/schema.py | 4 + photonix/photos/utils/classification.py | 5 +- photonix/photos/utils/tasks.py | 6 +- system/supervisord.conf | 31 +- ui/src/components/BoundingBoxes.js | 40 +- ui/src/components/PhotoDetail.js | 187 ++++--- ui/src/containers/FiltersContainer.js | 75 ++- ui/src/containers/PhotoDetailContainer.js | 34 +- ui/src/static/css/BoundingBoxes.css | 16 +- 23 files changed, 1455 insertions(+), 144 deletions(-) create mode 100644 photonix/classifiers/face_detection/__init__.py create mode 100644 photonix/classifiers/face_detection/model.py create mode 100644 photonix/classifiers/face_detection/mtcnn/__init__.py create mode 100644 photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py create mode 100755 photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py create mode 100644 photonix/classifiers/face_detection/mtcnn/layer_factory.py create mode 100644 photonix/classifiers/face_detection/mtcnn/mtcnn.py create mode 100644 photonix/classifiers/face_detection/mtcnn/network.py create mode 100755 photonix/classifiers/face_detection/mtcnn/network/__init__.py create mode 100755 photonix/classifiers/face_detection/mtcnn/network/factory.py create mode 100644 photonix/photos/management/commands/classification_face_detection_processor.py diff --git a/photonix/classifiers/face_detection/__init__.py b/photonix/classifiers/face_detection/__init__.py new file mode 100644 index 00000000..fbc53f82 --- /dev/null +++ b/photonix/classifiers/face_detection/__init__.py @@ -0,0 +1 @@ +from .model import FaceDetectionModel, run_on_photo diff --git a/photonix/classifiers/face_detection/model.py b/photonix/classifiers/face_detection/model.py new file mode 100644 index 00000000..8e81aca3 --- /dev/null +++ b/photonix/classifiers/face_detection/model.py @@ -0,0 +1,81 @@ +import operator +import os +import sys +from pathlib import Path + +import numpy as np +from PIL import Image +import redis +from redis_lock import Lock + +from photonix.classifiers.base_model import BaseModel +from .mtcnn import MTCNN + + +GRAPH_FILE = os.path.join('face_detection', 'mtcnn_weights.npy') + +class FaceDetectionModel(BaseModel): + name = 'face_detection' + version = 20210120 + approx_ram_mb = 1000 + max_num_workers = 2 + + def __init__(self, model_dir=None, graph_file=GRAPH_FILE, lock_name=None): + super().__init__(model_dir=model_dir) + + graph_file = os.path.join(self.model_dir, graph_file) + + if self.ensure_downloaded(lock_name=lock_name): + self.graph = self.load_graph(graph_file) + + def load_graph(self, graph_file): + r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) + with Lock(r, 'classifier_{}_load_graph'.format(self.name)): + if self.graph_cache_key in self.graph_cache: + return self.graph_cache[self.graph_cache_key] + + graph = MTCNN(weights_file=graph_file) + + self.graph_cache[self.graph_cache_key] = graph + return graph + + def predict(self, image_file, min_score=0.99): + image = Image.open(image_file) + image = np.asarray(image) + # detector = MTCNN() + results = self.graph.detect_faces(image) + return list(filter(lambda f: f['confidence'] > min_score, results)) + + +def run_on_photo(photo_id): + model = FaceDetectionModel() + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + from photonix.classifiers.runners import results_for_model_on_photo, get_or_create_tag + photo, results = results_for_model_on_photo(model, photo_id) + + if photo: + from django.utils import timezone + from photonix.photos.models import PhotoTag + photo.clear_tags(source='C', type='F') + for result in results: + tag = get_or_create_tag(library=photo.library, name='Unknown face', type='F', source='C') + x = (result['box'][0] + (result['box'][2] / 2)) / photo.base_file.width + y = (result['box'][1] + (result['box'][3] / 2)) / photo.base_file.height + width = result['box'][2] / photo.base_file.width + height = result['box'][3] / photo.base_file.height + score = result['confidence'] + PhotoTag(photo=photo, tag=tag, source='F', confidence=score, significance=score, position_x=x, position_y=y, size_x=width, size_y=height).save() + photo.classifier_color_completed_at = timezone.now() + photo.classifier_color_version = getattr(model, 'version', 0) + photo.save() + + return photo, results + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print('Argument required: image file path or Photo ID') + exit(1) + + _, results = run_on_photo(sys.argv[1]) + print(results) diff --git a/photonix/classifiers/face_detection/mtcnn/__init__.py b/photonix/classifiers/face_detection/mtcnn/__init__.py new file mode 100644 index 00000000..04f627bf --- /dev/null +++ b/photonix/classifiers/face_detection/mtcnn/__init__.py @@ -0,0 +1,30 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .mtcnn import MTCNN + + +__author__ = "Iván de Paz Centeno" +__version__= "0.1.0" diff --git a/photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py b/photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py new file mode 100644 index 00000000..dceae345 --- /dev/null +++ b/photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py @@ -0,0 +1,26 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .invalid_image import InvalidImage diff --git a/photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py b/photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py new file mode 100755 index 00000000..fbb558ef --- /dev/null +++ b/photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py @@ -0,0 +1,30 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +__author__ = "Iván de Paz Centeno" + +class InvalidImage(Exception): + pass diff --git a/photonix/classifiers/face_detection/mtcnn/layer_factory.py b/photonix/classifiers/face_detection/mtcnn/layer_factory.py new file mode 100644 index 00000000..89c39d59 --- /dev/null +++ b/photonix/classifiers/face_detection/mtcnn/layer_factory.py @@ -0,0 +1,227 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +#MIT License +# +#Copyright (c) 2018 Iván de Paz Centeno +# +#Permission is hereby granted, free of charge, to any person obtaining a copy +#of this software and associated documentation files (the "Software"), to deal +#in the Software without restriction, including without limitation the rights +#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +#copies of the Software, and to permit persons to whom the Software is +#furnished to do so, subject to the following conditions: +# +#The above copyright notice and this permission notice shall be included in all +#copies or substantial portions of the Software. +# +#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +#SOFTWARE. + +import tensorflow as tf +from distutils.version import LooseVersion + +__author__ = "Iván de Paz Centeno" + + +class LayerFactory(object): + """ + Allows to create stack layers for a given network. + """ + + AVAILABLE_PADDINGS = ('SAME', 'VALID') + + def __init__(self, network): + self.__network = network + + @staticmethod + def __validate_padding(padding): + if padding not in LayerFactory.AVAILABLE_PADDINGS: + raise Exception("Padding {} not valid".format(padding)) + + @staticmethod + def __validate_grouping(channels_input: int, channels_output: int, group: int): + if channels_input % group != 0: + raise Exception("The number of channels in the input does not match the group") + + if channels_output % group != 0: + raise Exception("The number of channels in the output does not match the group") + + @staticmethod + def vectorize_input(input_layer): + input_shape = input_layer.get_shape() + + if input_shape.ndims == 4: + # Spatial input, must be vectorized. + dim = 1 + for x in input_shape[1:].as_list(): + dim *= int(x) + + #dim = operator.mul(*(input_shape[1:].as_list())) + vectorized_input = tf.reshape(input_layer, [-1, dim]) + else: + vectorized_input, dim = (input_layer, input_shape[-1]) + + return vectorized_input, dim + + def __make_var(self, name: str, shape: list): + """ + Creates a tensorflow variable with the given name and shape. + :param name: name to set for the variable. + :param shape: list defining the shape of the variable. + :return: created TF variable. + """ + return tf.compat.v1.get_variable(name, shape, trainable=self.__network.is_trainable(), + use_resource=False) + + def new_feed(self, name: str, layer_shape: tuple): + """ + Creates a feed layer. This is usually the first layer in the network. + :param name: name of the layer + :return: + """ + + feed_data = tf.compat.v1.placeholder(tf.float32, layer_shape, 'input') + self.__network.add_layer(name, layer_output=feed_data) + + def new_conv(self, name: str, kernel_size: tuple, channels_output: int, + stride_size: tuple, padding: str='SAME', + group: int=1, biased: bool=True, relu: bool=True, input_layer_name: str=None): + """ + Creates a convolution layer for the network. + :param name: name for the layer + :param kernel_size: tuple containing the size of the kernel (Width, Height) + :param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size. + :param stride_size: tuple containing the size of the stride (Width, Height) + :param padding: Type of padding. Available values are: ('SAME', 'VALID') + :param group: groups for the kernel operation. More info required. + :param biased: boolean flag to set if biased or not. + :param relu: boolean flag to set if ReLu should be applied at the end of the layer or not. + :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of + the network. + """ + + # Verify that the padding is acceptable + self.__validate_padding(padding) + + input_layer = self.__network.get_layer(input_layer_name) + + # Get the number of channels in the input + channels_input = int(input_layer.get_shape()[-1]) + + # Verify that the grouping parameter is valid + self.__validate_grouping(channels_input, channels_output, group) + + # Convolution for a given input and kernel + convolve = lambda input_val, kernel: tf.nn.conv2d(input=input_val, + filters=kernel, + strides=[1, stride_size[1], stride_size[0], 1], + padding=padding) + + with tf.compat.v1.variable_scope(name) as scope: + kernel = self.__make_var('weights', shape=[kernel_size[1], kernel_size[0], channels_input // group, channels_output]) + + output = convolve(input_layer, kernel) + + # Add the biases, if required + if biased: + biases = self.__make_var('biases', [channels_output]) + output = tf.nn.bias_add(output, biases) + + # Apply ReLU non-linearity, if required + if relu: + output = tf.nn.relu(output, name=scope.name) + + + self.__network.add_layer(name, layer_output=output) + + def new_prelu(self, name: str, input_layer_name: str=None): + """ + Creates a new prelu layer with the given name and input. + :param name: name for this layer. + :param input_layer_name: name of the layer that serves as input for this one. + """ + input_layer = self.__network.get_layer(input_layer_name) + + with tf.compat.v1.variable_scope(name): + channels_input = int(input_layer.get_shape()[-1]) + alpha = self.__make_var('alpha', shape=[channels_input]) + output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer)) + + self.__network.add_layer(name, layer_output=output) + + def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='SAME', + input_layer_name: str=None): + """ + Creates a new max pooling layer. + :param name: name for the layer. + :param kernel_size: tuple containing the size of the kernel (Width, Height) + :param stride_size: tuple containing the size of the stride (Width, Height) + :param padding: Type of padding. Available values are: ('SAME', 'VALID') + :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of + the network. + """ + + self.__validate_padding(padding) + + input_layer = self.__network.get_layer(input_layer_name) + + output = tf.nn.max_pool2d(input=input_layer, + ksize=[1, kernel_size[1], kernel_size[0], 1], + strides=[1, stride_size[1], stride_size[0], 1], + padding=padding, + name=name) + + self.__network.add_layer(name, layer_output=output) + + def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): + """ + Creates a new fully connected layer. + + :param name: name for the layer. + :param output_count: number of outputs of the fully connected layer. + :param relu: boolean flag to set if ReLu should be applied at the end of this layer. + :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of + the network. + """ + + with tf.compat.v1.variable_scope(name): + input_layer = self.__network.get_layer(input_layer_name) + vectorized_input, dimension = self.vectorize_input(input_layer) + + weights = self.__make_var('weights', shape=[dimension, output_count]) + biases = self.__make_var('biases', shape=[output_count]) + operation = tf.compat.v1.nn.relu_layer if relu else tf.compat.v1.nn.xw_plus_b + + fc = operation(vectorized_input, weights, biases, name=name) + + self.__network.add_layer(name, layer_output=fc) + + def new_softmax(self, name, axis, input_layer_name: str=None): + """ + Creates a new softmax layer + :param name: name to set for the layer + :param axis: + :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of + the network. + """ + input_layer = self.__network.get_layer(input_layer_name) + + if LooseVersion(tf.__version__) < LooseVersion("1.5.0"): + max_axis = tf.reduce_max(input_tensor=input_layer, axis=axis, keepdims=True) + target_exp = tf.exp(input_layer - max_axis) + normalize = tf.reduce_sum(input_tensor=target_exp, axis=axis, keepdims=True) + else: + max_axis = tf.reduce_max(input_tensor=input_layer, axis=axis, keepdims=True) + target_exp = tf.exp(input_layer - max_axis) + normalize = tf.reduce_sum(input_tensor=target_exp, axis=axis, keepdims=True) + + softmax = tf.math.divide(target_exp, normalize, name) + + self.__network.add_layer(name, layer_output=softmax) + diff --git a/photonix/classifiers/face_detection/mtcnn/mtcnn.py b/photonix/classifiers/face_detection/mtcnn/mtcnn.py new file mode 100644 index 00000000..6702e264 --- /dev/null +++ b/photonix/classifiers/face_detection/mtcnn/mtcnn.py @@ -0,0 +1,500 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# +# This code is derived from the MTCNN implementation of David Sandberg for Facenet +# (https://github.com/davidsandberg/facenet/) +# It has been rebuilt from scratch, taking the David Sandberg's implementation as a reference. +# + +# import cv2 +import numpy as np +from PIL import Image +import pkg_resources + +from .exceptions import InvalidImage +from .network.factory import NetworkFactory + +__author__ = "Iván de Paz Centeno" + + +class StageStatus(object): + """ + Keeps status between MTCNN stages + """ + + def __init__(self, pad_result: tuple = None, width=0, height=0): + self.width = width + self.height = height + self.dy = self.edy = self.dx = self.edx = self.y = self.ey = self.x = self.ex = self.tmpw = self.tmph = [] + + if pad_result is not None: + self.update(pad_result) + + def update(self, pad_result: tuple): + s = self + s.dy, s.edy, s.dx, s.edx, s.y, s.ey, s.x, s.ex, s.tmpw, s.tmph = pad_result + + +class MTCNN(object): + """ + Allows to perform MTCNN Detection -> + a) Detection of faces (with the confidence probability) + b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) + """ + + def __init__(self, weights_file: str = None, min_face_size: int = 20, steps_threshold: list = None, + scale_factor: float = 0.709): + """ + Initializes the MTCNN. + :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load + the ones bundled with the package. + :param min_face_size: minimum size of the face to detect + :param steps_threshold: step's thresholds values + :param scale_factor: scale factor + """ + if steps_threshold is None: + steps_threshold = [0.6, 0.7, 0.7] + + if weights_file is None: + weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') + + self._min_face_size = min_face_size + self._steps_threshold = steps_threshold + self._scale_factor = scale_factor + + self._pnet, self._rnet, self._onet = NetworkFactory().build_P_R_O_nets_from_file(weights_file) + + @property + def min_face_size(self): + return self._min_face_size + + @min_face_size.setter + def min_face_size(self, mfc=20): + try: + self._min_face_size = int(mfc) + except ValueError: + self._min_face_size = 20 + + def __compute_scale_pyramid(self, m, min_layer): + scales = [] + factor_count = 0 + + while min_layer >= 12: + scales += [m * np.power(self._scale_factor, factor_count)] + min_layer = min_layer * self._scale_factor + factor_count += 1 + + return scales + + @staticmethod + def __scale_image(image, scale: float): + """ + Scales the image to a given scale. + :param image: + :param scale: + :return: + """ + height, width, _ = image.shape + + width_scaled = int(np.ceil(width * scale)) + height_scaled = int(np.ceil(height * scale)) + + # im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation = cv2.INTER_AREA) + im_data = Image.fromarray(image).resize((width_scaled, height_scaled), Image.BICUBIC) + im_data = np.asarray(im_data) + + # Normalize the image's pixels + im_data_normalized = (im_data - 127.5) * 0.0078125 + + return im_data_normalized + + @staticmethod + def __generate_bounding_box(imap, reg, scale, t): + + # use heatmap to generate bounding boxes + stride = 2 + cellsize = 12 + + imap = np.transpose(imap) + dx1 = np.transpose(reg[:, :, 0]) + dy1 = np.transpose(reg[:, :, 1]) + dx2 = np.transpose(reg[:, :, 2]) + dy2 = np.transpose(reg[:, :, 3]) + + y, x = np.where(imap >= t) + + if y.shape[0] == 1: + dx1 = np.flipud(dx1) + dy1 = np.flipud(dy1) + dx2 = np.flipud(dx2) + dy2 = np.flipud(dy2) + + score = imap[(y, x)] + reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) + + if reg.size == 0: + reg = np.empty(shape=(0, 3)) + + bb = np.transpose(np.vstack([y, x])) + + q1 = np.fix((stride * bb + 1) / scale) + q2 = np.fix((stride * bb + cellsize) / scale) + boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) + + return boundingbox, reg + + @staticmethod + def __nms(boxes, threshold, method): + """ + Non Maximum Suppression. + + :param boxes: np array with bounding boxes. + :param threshold: + :param method: NMS method to apply. Available values ('Min', 'Union') + :return: + """ + if boxes.size == 0: + return np.empty((0, 3)) + + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + s = boxes[:, 4] + + area = (x2 - x1 + 1) * (y2 - y1 + 1) + sorted_s = np.argsort(s) + + pick = np.zeros_like(s, dtype=np.int16) + counter = 0 + while sorted_s.size > 0: + i = sorted_s[-1] + pick[counter] = i + counter += 1 + idx = sorted_s[0:-1] + + xx1 = np.maximum(x1[i], x1[idx]) + yy1 = np.maximum(y1[i], y1[idx]) + xx2 = np.minimum(x2[i], x2[idx]) + yy2 = np.minimum(y2[i], y2[idx]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + + inter = w * h + + if method is 'Min': + o = inter / np.minimum(area[i], area[idx]) + else: + o = inter / (area[i] + area[idx] - inter) + + sorted_s = sorted_s[np.where(o <= threshold)] + + pick = pick[0:counter] + + return pick + + @staticmethod + def __pad(total_boxes, w, h): + # compute the padding coordinates (pad the bounding boxes to square) + tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) + tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) + numbox = total_boxes.shape[0] + + dx = np.ones(numbox, dtype=np.int32) + dy = np.ones(numbox, dtype=np.int32) + edx = tmpw.copy().astype(np.int32) + edy = tmph.copy().astype(np.int32) + + x = total_boxes[:, 0].copy().astype(np.int32) + y = total_boxes[:, 1].copy().astype(np.int32) + ex = total_boxes[:, 2].copy().astype(np.int32) + ey = total_boxes[:, 3].copy().astype(np.int32) + + tmp = np.where(ex > w) + edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) + ex[tmp] = w + + tmp = np.where(ey > h) + edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) + ey[tmp] = h + + tmp = np.where(x < 1) + dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) + x[tmp] = 1 + + tmp = np.where(y < 1) + dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) + y[tmp] = 1 + + return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph + + @staticmethod + def __rerec(bbox): + # convert bbox to square + height = bbox[:, 3] - bbox[:, 1] + width = bbox[:, 2] - bbox[:, 0] + max_side_length = np.maximum(width, height) + bbox[:, 0] = bbox[:, 0] + width * 0.5 - max_side_length * 0.5 + bbox[:, 1] = bbox[:, 1] + height * 0.5 - max_side_length * 0.5 + bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(max_side_length, (2, 1))) + return bbox + + @staticmethod + def __bbreg(boundingbox, reg): + # calibrate bounding boxes + if reg.shape[1] == 1: + reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) + + w = boundingbox[:, 2] - boundingbox[:, 0] + 1 + h = boundingbox[:, 3] - boundingbox[:, 1] + 1 + b1 = boundingbox[:, 0] + reg[:, 0] * w + b2 = boundingbox[:, 1] + reg[:, 1] * h + b3 = boundingbox[:, 2] + reg[:, 2] * w + b4 = boundingbox[:, 3] + reg[:, 3] * h + boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) + return boundingbox + + def detect_faces(self, img) -> list: + """ + Detects bounding boxes from the specified image. + :param img: image to process + :return: list containing all the bounding boxes detected with their keypoints. + """ + if img is None or not hasattr(img, "shape"): + raise InvalidImage("Image not valid.") + + height, width, _ = img.shape + stage_status = StageStatus(width=width, height=height) + + m = 12 / self._min_face_size + min_layer = np.amin([height, width]) * m + + scales = self.__compute_scale_pyramid(m, min_layer) + + stages = [self.__stage1, self.__stage2, self.__stage3] + result = [scales, stage_status] + + # We pipe here each of the stages + for stage in stages: + result = stage(img, result[0], result[1]) + + [total_boxes, points] = result + + bounding_boxes = [] + + for bounding_box, keypoints in zip(total_boxes, points.T): + x = max(0, int(bounding_box[0])) + y = max(0, int(bounding_box[1])) + width = int(bounding_box[2] - x) + height = int(bounding_box[3] - y) + bounding_boxes.append({ + 'box': [x, y, width, height], + 'confidence': bounding_box[-1], + 'keypoints': { + 'left_eye': (int(keypoints[0]), int(keypoints[5])), + 'right_eye': (int(keypoints[1]), int(keypoints[6])), + 'nose': (int(keypoints[2]), int(keypoints[7])), + 'mouth_left': (int(keypoints[3]), int(keypoints[8])), + 'mouth_right': (int(keypoints[4]), int(keypoints[9])), + } + }) + + return bounding_boxes + + def __stage1(self, image, scales: list, stage_status: StageStatus): + """ + First stage of the MTCNN. + :param image: + :param scales: + :param stage_status: + :return: + """ + total_boxes = np.empty((0, 9)) + status = stage_status + + for scale in scales: + scaled_image = self.__scale_image(image, scale) + + img_x = np.expand_dims(scaled_image, 0) + img_y = np.transpose(img_x, (0, 2, 1, 3)) + + out = self._pnet.predict(img_y) + + out0 = np.transpose(out[0], (0, 2, 1, 3)) + out1 = np.transpose(out[1], (0, 2, 1, 3)) + + boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), + out0[0, :, :, :].copy(), scale, self._steps_threshold[0]) + + # inter-scale nms + pick = self.__nms(boxes.copy(), 0.5, 'Union') + if boxes.size > 0 and pick.size > 0: + boxes = boxes[pick, :] + total_boxes = np.append(total_boxes, boxes, axis=0) + + numboxes = total_boxes.shape[0] + + if numboxes > 0: + pick = self.__nms(total_boxes.copy(), 0.7, 'Union') + total_boxes = total_boxes[pick, :] + + regw = total_boxes[:, 2] - total_boxes[:, 0] + regh = total_boxes[:, 3] - total_boxes[:, 1] + + qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw + qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh + qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw + qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh + + total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) + total_boxes = self.__rerec(total_boxes.copy()) + + total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) + status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), + width=stage_status.width, height=stage_status.height) + + return total_boxes, status + + def __stage2(self, img, total_boxes, stage_status: StageStatus): + """ + Second stage of the MTCNN. + :param img: + :param total_boxes: + :param stage_status: + :return: + """ + + num_boxes = total_boxes.shape[0] + if num_boxes == 0: + return total_boxes, stage_status + + # second stage + tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) + + for k in range(0, num_boxes): + tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) + + tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ + img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] + + if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: + # tempimg[:,:,:, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) + tempimg[:, :, :, k] = np.asarray(Image.fromarray(np.uint8(tmp)).resize((24, 24), Image.BICUBIC)) + + else: + return np.empty(shape=(0,)), stage_status + + tempimg = (tempimg - 127.5) * 0.0078125 + tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) + + out = self._rnet.predict(tempimg1) + + out0 = np.transpose(out[0]) + out1 = np.transpose(out[1]) + + score = out1[1, :] + + ipass = np.where(score > self._steps_threshold[1]) + + total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) + + mv = out0[:, ipass[0]] + + if total_boxes.shape[0] > 0: + pick = self.__nms(total_boxes, 0.7, 'Union') + total_boxes = total_boxes[pick, :] + total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) + total_boxes = self.__rerec(total_boxes.copy()) + + return total_boxes, stage_status + + def __stage3(self, img, total_boxes, stage_status: StageStatus): + """ + Third stage of the MTCNN. + + :param img: + :param total_boxes: + :param stage_status: + :return: + """ + num_boxes = total_boxes.shape[0] + if num_boxes == 0: + return total_boxes, np.empty(shape=(0,)) + + total_boxes = np.fix(total_boxes).astype(np.int32) + + status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), + width=stage_status.width, height=stage_status.height) + + tempimg = np.zeros((48, 48, 3, num_boxes)) + + for k in range(0, num_boxes): + + tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) + + tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ + img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] + + if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: + # tempimg[:,:,:, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) + tempimg[:, :, :, k] = np.asarray(Image.fromarray(np.uint8(tmp)).resize((48, 48), Image.BICUBIC)) + else: + return np.empty(shape=(0,)), np.empty(shape=(0,)) + + tempimg = (tempimg - 127.5) * 0.0078125 + tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) + + out = self._onet.predict(tempimg1) + out0 = np.transpose(out[0]) + out1 = np.transpose(out[1]) + out2 = np.transpose(out[2]) + + score = out2[1, :] + + points = out1 + + ipass = np.where(score > self._steps_threshold[2]) + + points = points[:, ipass[0]] + + total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) + + mv = out0[:, ipass[0]] + + w = total_boxes[:, 2] - total_boxes[:, 0] + 1 + h = total_boxes[:, 3] - total_boxes[:, 1] + 1 + + points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 + points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 + + if total_boxes.shape[0] > 0: + total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) + pick = self.__nms(total_boxes.copy(), 0.7, 'Min') + total_boxes = total_boxes[pick, :] + points = points[:, pick] + + return total_boxes, points diff --git a/photonix/classifiers/face_detection/mtcnn/network.py b/photonix/classifiers/face_detection/mtcnn/network.py new file mode 100644 index 00000000..7c5f3148 --- /dev/null +++ b/photonix/classifiers/face_detection/mtcnn/network.py @@ -0,0 +1,111 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +#MIT License +# +#Copyright (c) 2018 Iván de Paz Centeno +# +#Permission is hereby granted, free of charge, to any person obtaining a copy +#of this software and associated documentation files (the "Software"), to deal +#in the Software without restriction, including without limitation the rights +#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +#copies of the Software, and to permit persons to whom the Software is +#furnished to do so, subject to the following conditions: +# +#The above copyright notice and this permission notice shall be included in all +#copies or substantial portions of the Software. +# +#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +#SOFTWARE. + +import tensorflow as tf + +__author__ = "Iván de Paz Centeno" + + +class Network(object): + + def __init__(self, session, trainable: bool=True): + """ + Initializes the network. + :param trainable: flag to determine if this network should be trainable or not. + """ + self._session = session + self.__trainable = trainable + self.__layers = {} + self.__last_layer_name = None + + with tf.compat.v1.variable_scope(self.__class__.__name__.lower()): + self._config() + + def _config(self): + """ + Configures the network layers. + It is usually done using the LayerFactory() class. + """ + raise NotImplementedError("This method must be implemented by the network.") + + def add_layer(self, name: str, layer_output): + """ + Adds a layer to the network. + :param name: name of the layer to add + :param layer_output: output layer. + """ + self.__layers[name] = layer_output + self.__last_layer_name = name + + def get_layer(self, name: str=None): + """ + Retrieves the layer by its name. + :param name: name of the layer to retrieve. If name is None, it will retrieve the last added layer to the + network. + :return: layer output + """ + if name is None: + name = self.__last_layer_name + + return self.__layers[name] + + def is_trainable(self): + """ + Getter for the trainable flag. + """ + return self.__trainable + + def set_weights(self, weights_values: dict, ignore_missing=False): + """ + Sets the weights values of the network. + :param weights_values: dictionary with weights for each layer + """ + network_name = self.__class__.__name__.lower() + + with tf.compat.v1.variable_scope(network_name): + for layer_name in weights_values: + with tf.compat.v1.variable_scope(layer_name, reuse=True): + for param_name, data in weights_values[layer_name].items(): + try: + var = tf.compat.v1.get_variable(param_name, use_resource=False) + self._session.run(var.assign(data)) + + except ValueError: + if not ignore_missing: + raise + + def feed(self, image): + """ + Feeds the network with an image + :param image: image (perhaps loaded with CV2) + :return: network result + """ + network_name = self.__class__.__name__.lower() + + with tf.compat.v1.variable_scope(network_name): + return self._feed(image) + + def _feed(self, image): + raise NotImplementedError("Method not implemented.") \ No newline at end of file diff --git a/photonix/classifiers/face_detection/mtcnn/network/__init__.py b/photonix/classifiers/face_detection/mtcnn/network/__init__.py new file mode 100755 index 00000000..48d3830c --- /dev/null +++ b/photonix/classifiers/face_detection/mtcnn/network/__init__.py @@ -0,0 +1,24 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff --git a/photonix/classifiers/face_detection/mtcnn/network/factory.py b/photonix/classifiers/face_detection/mtcnn/network/factory.py new file mode 100755 index 00000000..27dd4772 --- /dev/null +++ b/photonix/classifiers/face_detection/mtcnn/network/factory.py @@ -0,0 +1,131 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, PReLU, Flatten, Softmax +from tensorflow.keras.models import Model + +import numpy as np + + +class NetworkFactory: + + def build_pnet(self, input_shape=None): + if input_shape is None: + input_shape = (None, None, 3) + + p_inp = Input(input_shape) + + p_layer = Conv2D(10, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_inp) + p_layer = PReLU(shared_axes=[1, 2])(p_layer) + p_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(p_layer) + + p_layer = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer) + p_layer = PReLU(shared_axes=[1, 2])(p_layer) + + p_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer) + p_layer = PReLU(shared_axes=[1, 2])(p_layer) + + p_layer_out1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(p_layer) + p_layer_out1 = Softmax(axis=3)(p_layer_out1) + + p_layer_out2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(p_layer) + + p_net = Model(p_inp, [p_layer_out2, p_layer_out1]) + + return p_net + + def build_rnet(self, input_shape=None): + if input_shape is None: + input_shape = (24, 24, 3) + + r_inp = Input(input_shape) + + r_layer = Conv2D(28, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_inp) + r_layer = PReLU(shared_axes=[1, 2])(r_layer) + r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(r_layer) + + r_layer = Conv2D(48, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_layer) + r_layer = PReLU(shared_axes=[1, 2])(r_layer) + r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(r_layer) + + r_layer = Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding="valid")(r_layer) + r_layer = PReLU(shared_axes=[1, 2])(r_layer) + r_layer = Flatten()(r_layer) + r_layer = Dense(128)(r_layer) + r_layer = PReLU()(r_layer) + + r_layer_out1 = Dense(2)(r_layer) + r_layer_out1 = Softmax(axis=1)(r_layer_out1) + + r_layer_out2 = Dense(4)(r_layer) + + r_net = Model(r_inp, [r_layer_out2, r_layer_out1]) + + return r_net + + def build_onet(self, input_shape=None): + if input_shape is None: + input_shape = (48, 48, 3) + + o_inp = Input(input_shape) + o_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_inp) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(o_layer) + + o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(o_layer) + + o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + o_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(o_layer) + + o_layer = Conv2D(128, kernel_size=(2, 2), strides=(1, 1), padding="valid")(o_layer) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + + o_layer = Flatten()(o_layer) + o_layer = Dense(256)(o_layer) + o_layer = PReLU()(o_layer) + + o_layer_out1 = Dense(2)(o_layer) + o_layer_out1 = Softmax(axis=1)(o_layer_out1) + o_layer_out2 = Dense(4)(o_layer) + o_layer_out3 = Dense(10)(o_layer) + + o_net = Model(o_inp, [o_layer_out2, o_layer_out3, o_layer_out1]) + return o_net + + def build_P_R_O_nets_from_file(self, weights_file): + weights = np.load(weights_file, allow_pickle=True).tolist() + + p_net = self.build_pnet() + r_net = self.build_rnet() + o_net = self.build_onet() + + p_net.set_weights(weights['pnet']) + r_net.set_weights(weights['rnet']) + o_net.set_weights(weights['onet']) + + return p_net, r_net, o_net diff --git a/photonix/classifiers/object/model.py b/photonix/classifiers/object/model.py index af4c179b..70c3f19f 100644 --- a/photonix/classifiers/object/model.py +++ b/photonix/classifiers/object/model.py @@ -136,8 +136,9 @@ def run_on_photo(photo_id): from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='O') for result in results: - tag = get_or_create_tag(library=photo.library, name=result['label'], type='O', source='C') - PhotoTag(photo=photo, tag=tag, source='C', confidence=result['score'], significance=result['significance'], position_x=result['x'], position_y=result['y'], size_x=result['width'], size_y=result['height']).save() + if result['label'] != 'Human face': # We have a specialised face detector + tag = get_or_create_tag(library=photo.library, name=result['label'], type='O', source='C') + PhotoTag(photo=photo, tag=tag, source='C', confidence=result['score'], significance=result['significance'], position_x=result['x'], position_y=result['y'], size_x=result['width'], size_y=result['height']).save() photo.classifier_object_completed_at = timezone.now() photo.classifier_object_version = getattr(model, 'version', 0) photo.save() @@ -148,7 +149,7 @@ def run_on_photo(photo_id): if __name__ == '__main__': model = ObjectModel() if len(sys.argv) != 2: - print('Argument required: image file path') + print('Argument required: image file path or Photo ID') exit(1) results = run_on_photo(sys.argv[1]) diff --git a/photonix/classifiers/runners.py b/photonix/classifiers/runners.py index 950e0285..38509f30 100644 --- a/photonix/classifiers/runners.py +++ b/photonix/classifiers/runners.py @@ -1,3 +1,4 @@ +import os import re from uuid import UUID @@ -28,12 +29,20 @@ def results_for_model_on_photo(model, photo_id): elif hasattr(photo_id, 'id'): photo = photo_id + # import pdb; pdb.set_trace() + # Is an individual filename so return the prediction if not is_photo_instance: return None, model.predict(photo_id) # Is a Photo model instance so needs saving if not photo: + # Handle running scripts from command line and Photo IDs + if not os.environ.get('DJANGO_SETTINGS_MODULE'): + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "photonix.web.settings") + import django + django.setup() + from photonix.photos.models import Photo photo = Photo.objects.get(id=photo_id) diff --git a/photonix/classifiers/style/model.py b/photonix/classifiers/style/model.py index 7aae2e97..5711fb23 100644 --- a/photonix/classifiers/style/model.py +++ b/photonix/classifiers/style/model.py @@ -5,8 +5,8 @@ import numpy as np import redis -import tensorflow as tf from redis_lock import Lock +import tensorflow as tf from photonix.classifiers.base_model import BaseModel diff --git a/photonix/photos/management/commands/classification_face_detection_processor.py b/photonix/photos/management/commands/classification_face_detection_processor.py new file mode 100644 index 00000000..70384bb8 --- /dev/null +++ b/photonix/photos/management/commands/classification_face_detection_processor.py @@ -0,0 +1,22 @@ +from django.core.management.base import BaseCommand +# Pre-load the model graphs so it doesn't have to be done for each job +from photonix.classifiers.face_detection import FaceDetectionModel, run_on_photo +from photonix.photos.models import Task +from photonix.photos.utils.classification import ThreadedQueueProcessor + + +print('Loading face detection model') +model = FaceDetectionModel() + + +class Command(BaseCommand): + help = 'Runs the workers with the face detection model.' + + def run_processors(self): + num_workers = 4 + batch_size = 64 + threaded_queue_processor = ThreadedQueueProcessor(model, 'classify.face_detection', run_on_photo, num_workers, batch_size) + threaded_queue_processor.run() + + def handle(self, *args, **options): + self.run_processors() diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index e0808041..32e32568 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -53,6 +53,7 @@ class PhotoNode(DjangoObjectType): location = graphene.String() location_tags = graphene.List(PhotoTagType) object_tags = graphene.List(PhotoTagType) + person_tags = graphene.List(PhotoTagType) color_tags = graphene.List(PhotoTagType) style_tags = graphene.List(PhotoTagType) width = graphene.Int() @@ -77,6 +78,9 @@ def resolve_location_tags(self, info): def resolve_object_tags(self, info): return self.photo_tags.filter(tag__type='O') + def resolve_person_tags(self, info): + return self.photo_tags.filter(tag__type='F') + def resolve_color_tags(self, info): return self.photo_tags.filter(tag__type='C') diff --git a/photonix/photos/utils/classification.py b/photonix/photos/utils/classification.py index 308cd89c..c7cf9f1f 100644 --- a/photonix/photos/utils/classification.py +++ b/photonix/photos/utils/classification.py @@ -1,6 +1,7 @@ import queue import threading from time import sleep +import traceback from django.db import transaction from django.utils import timezone @@ -12,6 +13,7 @@ 'location', 'object', 'style', + 'face_detection', ] @@ -60,7 +62,8 @@ def __process_task(self, task): task.start() self.runner(task.subject_id) task.complete() - except: + except Exception: + traceback.print_exc() task.failed() def __clean_up(self): diff --git a/photonix/photos/utils/tasks.py b/photonix/photos/utils/tasks.py index 40a0f47a..66b3f05a 100644 --- a/photonix/photos/utils/tasks.py +++ b/photonix/photos/utils/tasks.py @@ -5,11 +5,11 @@ from photonix.photos.models import Task -def requeue_stuck_tasks(task_type, age_hours=24, max_num=8): +def requeue_stuck_tasks(task_type, age_hours=0.01, max_num=8): # Set old, failed jobs to Pending - for task in Task.objects.filter(type=task_type, status='S', updated_at__lt=timezone.now() - timedelta(hours=24))[:8]: + for task in Task.objects.filter(type=task_type, status='S', updated_at__lt=timezone.now() - timedelta(hours=24))[:max_num]: task.status = 'P' task.save() - for task in Task.objects.filter(type=task_type, status='F', updated_at__lt=timezone.now() - timedelta(hours=24))[:8]: + for task in Task.objects.filter(type=task_type, status='F', updated_at__lt=timezone.now() - timedelta(hours=24))[:max_num]: task.status = 'P' task.save() diff --git a/system/supervisord.conf b/system/supervisord.conf index de87ddc0..bde0d7e0 100644 --- a/system/supervisord.conf +++ b/system/supervisord.conf @@ -31,7 +31,7 @@ stdout_logfile_maxbytes=0 [program:webpack] command=/srv/system/run_webpack_server.sh -startsecs=10 +startsecs=0 directory=/srv/ui stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -40,7 +40,7 @@ stdout_logfile_maxbytes=0 [program:storybook] command=/srv/system/run_storybook.sh -startsecs=30 +startsecs=0 directory=/srv/ui stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -49,7 +49,7 @@ stdout_logfile_maxbytes=0 [program:watch_photos] command=bash -c "nice -n 16 python /srv/photonix/manage.py watch_photos" -startsecs=12 +startsecs=10 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -58,7 +58,7 @@ stdout_logfile_maxbytes=0 [program:raw_scheduler] command=bash -c "sleep 5 && nice -n 17 python /srv/photonix/manage.py raw_scheduler" -startsecs=14 +startsecs=15 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -67,7 +67,7 @@ stdout_logfile_maxbytes=0 [program:raw_processor] command=bash -c "sleep 6 && nice -n 17 python /srv/photonix/manage.py raw_processor" -startsecs=15 +startsecs=16 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -76,7 +76,7 @@ stdout_logfile_maxbytes=0 [program:thumbnail_scheduler] command=bash -c "sleep 7 && nice -n 17 python /srv/photonix/manage.py thumbnail_processor" -startsecs=16 +startsecs=17 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -85,7 +85,7 @@ stdout_logfile_maxbytes=0 [program:classification_scheduler] command=bash -c "sleep 8 && nice -n 18 python /srv/photonix/manage.py classification_scheduler" -startsecs=17 +startsecs=18 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -94,7 +94,7 @@ stdout_logfile_maxbytes=0 [program:classification_color_processor] command=bash -c "sleep 9 && nice -n 19 python /srv/photonix/manage.py classification_color_processor" -startsecs=18 +startsecs=19 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -103,7 +103,7 @@ stdout_logfile_maxbytes=0 [program:classification_location_processor] command=bash -c "sleep 10 && nice -n 19 python /srv/photonix/manage.py classification_location_processor" -startsecs=19 +startsecs=20 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -112,7 +112,7 @@ stdout_logfile_maxbytes=0 [program:classification_style_processor] command=bash -c "sleep 11 && nice -n 19 python /srv/photonix/manage.py classification_style_processor" -startsecs=20 +startsecs=21 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -121,7 +121,16 @@ stdout_logfile_maxbytes=0 [program:classification_object_processor] command=bash -c "sleep 12 && nice -n 19 python /srv/photonix/manage.py classification_object_processor" -startsecs=21 +startsecs=22 +environment=PYTHONPATH=/srv +stderr_logfile=/dev/stderr +stdout_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_logfile_maxbytes=0 + +[program:classification_face_detection_processor] +command=bash -c "sleep 13 && nice -n 19 python /srv/photonix/manage.py classification_face_detection_processor" +startsecs=23 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 50f94cac..55edeb25 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -2,9 +2,9 @@ import React from 'react' import '../static/css/BoundingBoxes.css' -const BoundingBoxes = ({ photoWidth, photoHeight, boxes }) => { +const BoundingBoxes = ({ photoWidth, photoHeight, boxes, className }) => { let multiplier = window.innerWidth / photoWidth - if ((window.innerHeight / photoHeight) < multiplier) { + if (window.innerHeight / photoHeight < multiplier) { multiplier = window.innerHeight / photoHeight } let displayHeight = photoHeight * multiplier @@ -14,19 +14,31 @@ const BoundingBoxes = ({ photoWidth, photoHeight, boxes }) => { return (
- { - boxes.map((box, index) => { - let width = (box.sizeX * displayWidth) + 'px' - let height = (box.sizeY * displayHeight) + 'px' - let left = offsetLeft + (box.positionX * displayWidth) - (box.sizeX * displayWidth / 2) + 'px' - let top = offsetTop + (box.positionY * displayHeight) - (box.sizeY * displayHeight / 2) + 'px' - return ( -
-
{box.name}
+ {boxes.map((box, index) => { + let width = box.sizeX * displayWidth + 'px' + let height = box.sizeY * displayHeight + 'px' + let left = + offsetLeft + + box.positionX * displayWidth - + (box.sizeX * displayWidth) / 2 + + 'px' + let top = + offsetTop + + box.positionY * displayHeight - + (box.sizeY * displayHeight) / 2 + + 'px' + return ( +
+
+ {box.name}
- ) - }) - } +
+ ) + })}
) } diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index 3418285c..e1174f0a 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -1,4 +1,4 @@ -import React, { useState,useEffect } from 'react' +import React, { useState, useEffect } from 'react' import history from '../history' import BoundingBoxes from './BoundingBoxes' @@ -6,49 +6,55 @@ import MapViewContainer from '../containers/MapViewContainer' import ColorTags from './ColorTags' import HierarchicalTagsContainer from '../containers/HierarchicalTagsContainer' import StarRating from './StarRating' -import {PHOTO_UPDATE} from '../graphql/photo' -import { useMutation} from '@apollo/react-hooks'; +import { PHOTO_UPDATE } from '../graphql/photo' +import { useMutation } from '@apollo/react-hooks' import { ReactComponent as CloseIcon } from '../static/images/close.svg' import { ReactComponent as ArrowDownIcon } from '../static/images/arrow_down.svg' import '../static/css/PhotoDetail.css' - const PhotoDetail = ({ photoId, photo }) => { - const [starRating,updateStarRating] = useState(photo.starRating) + const [starRating, updateStarRating] = useState(photo.starRating) const [updatePhoto] = useMutation(PHOTO_UPDATE) - useEffect (() => { + useEffect(() => { updateStarRating(photo.starRating) - },[photo.starRating]) + }, [photo.starRating]) const onStarClick = (num, e) => { if (starRating === num) { updateStarRating(0) updatePhoto({ variables: { - photoId:photoId, - starRating:0 - } - }).catch(e => {}) - } - else { + photoId: photoId, + starRating: 0, + }, + }).catch((e) => {}) + } else { updateStarRating(num) updatePhoto({ variables: { - photoId:photoId, - starRating:num - } - }).catch(e => {}) - + photoId: photoId, + starRating: num, + }, + }).catch((e) => {}) } } - let boxes = photo.objectTags.map((objectTag) => { + let objectBoxes = photo.objectTags.map((tag) => { + return { + name: tag.tag.name, + positionX: tag.positionX, + positionY: tag.positionY, + sizeX: tag.sizeX, + sizeY: tag.sizeY, + } + }) + let faceBoxes = photo.personTags.map((tag) => { return { - name: objectTag.tag.name, - positionX: objectTag.positionX, - positionY: objectTag.positionY, - sizeX: objectTag.sizeX, - sizeY: objectTag.sizeY, + name: tag.tag.name, + positionX: tag.positionX, + positionY: tag.positionY, + sizeX: tag.sizeX, + sizeY: tag.sizeY, } }) @@ -64,17 +70,33 @@ const PhotoDetail = ({ photoId, photo }) => { date = new Intl.DateTimeFormat().format(date) } return ( -
+
- +

Camera

    - {photo.camera ?
  • {photo.camera.make} {photo.camera.model}
  • : ''} + {photo.camera ? ( +
  • + {photo.camera.make} {photo.camera.model} +
  • + ) : ( + '' + )} {date ?
  • Date: {date}
  • : ''}
  • Aperture: {photo.aperture}
  • Exposure: {photo.exposure}
  • @@ -83,84 +105,103 @@ const PhotoDetail = ({ photoId, photo }) => {
  • Flash: {photo.flash ? 'ON' : 'OFF'}
  • Metering mode: {photo.meteringMode}
  • {photo.driveMode ?
  • Drive mode: {photo.driveMode}
  • : ''} - {photo.shootingMode ?
  • Shooting mode: {photo.shootingMode}
  • : ''} + {photo.shootingMode ? ( +
  • Shooting mode: {photo.shootingMode}
  • + ) : ( + '' + )}
- { - photo.locationTags.length - ? + {photo.locationTags.length ? (

Locations

- { - let newItem = item.tag - newItem.parent = item.parent - return newItem - })} /> + { + let newItem = item.tag + newItem.parent = item.parent + return newItem + })} + />
- : + ) : ( '' - } - { - photo.location - ? + )} + {photo.location ? (

Map

- { - - } + { + + }
- : + ) : ( '' - } - { - photo.colorTags.length - ? + )} + {photo.colorTags.length ? (

Colors

- (item.tag))} /> + item.tag)} />
- : + ) : ( '' - } - { - photo.objectTags.length - ? + )} + {photo.objectTags.length ? (

Objects

    - { - photo.objectTags.map((photoTag, index) => ( -
  • {photoTag.tag.name}
  • - )) - } + {photo.objectTags.map((photoTag, index) => ( +
  • {photoTag.tag.name}
  • + ))} +
+
+ ) : ( + '' + )} + {photo.personTags.length ? ( +
+

People

+
    + {photo.personTags.map((photoTag, index) => ( +
  • {photoTag.tag.name}
  • + ))}
- : + ) : ( '' - } - { - photo.styleTags.length - ? + )} + {photo.styleTags.length ? (

Styles

    - { - photo.styleTags.map((photoTag, index) => ( -
  • {photoTag.tag.name}
  • - )) - } + {photo.styleTags.map((photoTag, index) => ( +
  • {photoTag.tag.name}
  • + ))}
- : + ) : ( '' - } + )}
- + +
diff --git a/ui/src/containers/FiltersContainer.js b/ui/src/containers/FiltersContainer.js index 62956dc1..b895ad9f 100644 --- a/ui/src/containers/FiltersContainer.js +++ b/ui/src/containers/FiltersContainer.js @@ -1,11 +1,10 @@ -import React from 'react' +import React from 'react' import { useQuery } from '@apollo/react-hooks' import { useSelector } from 'react-redux' -import gql from "graphql-tag" +import gql from 'graphql-tag' import Filters from '../components/Filters' import Spinner from '../components/Spinner' - const GET_FILTERS = gql` { allLocationTags { @@ -50,7 +49,7 @@ const GET_FILTERS = gql` } ` -function createFilterSelection(sectionName, data, prefix='tag') { +function createFilterSelection(sectionName, data, prefix = 'tag') { return { name: sectionName, items: data.map((tag) => { @@ -69,8 +68,8 @@ function createFilterSelection(sectionName, data, prefix='tag') { } const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { - const user = useSelector(state => state.user) // Using user here from Redux store so we can wait for any JWT tokens to be refreshed before running GraphQL queries that require authentication - const { loading, error, data } = useQuery(GET_FILTERS, {skip: !user}) + const user = useSelector((state) => state.user) // Using user here from Redux store so we can wait for any JWT tokens to be refreshed before running GraphQL queries that require authentication + const { loading, error, data } = useQuery(GET_FILTERS, { skip: !user }) if (loading) return if (error) return `Error! ${error.message}` @@ -95,48 +94,78 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { if (data.allCameras.length) { filterData.push({ name: 'Cameras', - items: data.allCameras.map((camera) => ( - {id: 'camera:' + camera.id, name: `${camera.make} ${camera.model}`} - )), + items: data.allCameras.map((camera) => ({ + id: 'camera:' + camera.id, + name: `${camera.make} ${camera.model}`, + })), }) } if (data.allLenses.length) { filterData.push(createFilterSelection('Lenses', data.allLenses, 'lens')) } if (data.allApertures.length) { - filterData.push(createFilterSelection('Aperture', data.allApertures, 'aperture')) + filterData.push( + createFilterSelection('Aperture', data.allApertures, 'aperture') + ) } if (data.allExposures.length) { - filterData.push(createFilterSelection('Exposure', data.allExposures, 'exposure')) + filterData.push( + createFilterSelection('Exposure', data.allExposures, 'exposure') + ) } if (data.allIsoSpeeds.length) { - filterData.push(createFilterSelection('ISO Speed', data.allIsoSpeeds, 'isoSpeed')) + filterData.push( + createFilterSelection('ISO Speed', data.allIsoSpeeds, 'isoSpeed') + ) } if (data.allFocalLengths.length) { - filterData.push(createFilterSelection('Focal Length', data.allFocalLengths, 'focalLength')) + filterData.push( + createFilterSelection( + 'Focal Length', + data.allFocalLengths, + 'focalLength' + ) + ) } filterData.push({ name: 'Rating', items: [ - {id: 'rating:1', name: 1}, - {id: 'rating:2', name: 2}, - {id: 'rating:3', name: 3}, - {id: 'rating:4', name: 4}, - {id: 'rating:5', name: 5} - ] + { id: 'rating:1', name: 1 }, + { id: 'rating:2', name: 2 }, + { id: 'rating:3', name: 3 }, + { id: 'rating:4', name: 4 }, + { id: 'rating:5', name: 5 }, + ], }) filterData.push({ name: 'Flash', - items: [{id: 'flash:on', name: 'On'}, {id: 'flash:off', name: 'Off'}] + items: [ + { id: 'flash:on', name: 'On' }, + { id: 'flash:off', name: 'Off' }, + ], }) if (data.allMeteringModes.length) { - filterData.push(createFilterSelection('Metering Mode', data.allMeteringModes, 'meeteringMode')) + filterData.push( + createFilterSelection( + 'Metering Mode', + data.allMeteringModes, + 'meeteringMode' + ) + ) } if (data.allDriveModes.length) { - filterData.push(createFilterSelection('Drive Mode', data.allDriveModes, 'driveMode')) + filterData.push( + createFilterSelection('Drive Mode', data.allDriveModes, 'driveMode') + ) } if (data.allShootingModes.length) { - filterData.push(createFilterSelection('Shooting Mode', data.allShootingModes, 'shootingMode')) + filterData.push( + createFilterSelection( + 'Shooting Mode', + data.allShootingModes, + 'shootingMode' + ) + ) } } diff --git a/ui/src/containers/PhotoDetailContainer.js b/ui/src/containers/PhotoDetailContainer.js index 1eff377d..456804fd 100644 --- a/ui/src/containers/PhotoDetailContainer.js +++ b/ui/src/containers/PhotoDetailContainer.js @@ -1,12 +1,11 @@ -import React, { useEffect,useState,useCallback } from 'react' -import { useQuery,refetch } from '@apollo/react-hooks'; -import gql from "graphql-tag" +import React, { useEffect, useState, useCallback } from 'react' +import { useQuery, refetch } from '@apollo/react-hooks' +import gql from 'graphql-tag' import history from '../history' import PhotoDetail from '../components/PhotoDetail' import Spinner from '../components/Spinner' - const ESCAPE_KEY = 27 const BACKSPACE_KEY = 8 @@ -57,6 +56,16 @@ const GET_PHOTO = gql` sizeX sizeY } + personTags { + id + tag { + name + } + positionX + positionY + sizeX + sizeY + } colorTags { id tag { @@ -78,15 +87,14 @@ const GET_PHOTO = gql` const PhotoDetailContainer = (props) => { const [photo, setPhoto] = useState() - const { loading, error, data,refetch } = useQuery(GET_PHOTO, { + const { loading, error, data, refetch } = useQuery(GET_PHOTO, { variables: { id: props.match.params.photoId, - } + }, }) - useEffect(() => { - const handleKeyDown = event => { + const handleKeyDown = (event) => { switch (event.keyCode) { case ESCAPE_KEY: case BACKSPACE_KEY: @@ -104,18 +112,20 @@ const PhotoDetailContainer = (props) => { } }, []) - useEffect (() => { + useEffect(() => { refetch() - if(!loading && data) { + if (!loading && data) { setPhoto(data) } - },[data]) + }, [data]) if (loading) return if (error) return `Error! ${error.message}` if (photo && photo.photo) { - return + return ( + + ) } return null } diff --git a/ui/src/static/css/BoundingBoxes.css b/ui/src/static/css/BoundingBoxes.css index dc0a9caa..f9f2cdf5 100644 --- a/ui/src/static/css/BoundingBoxes.css +++ b/ui/src/static/css/BoundingBoxes.css @@ -3,15 +3,25 @@ height: 100%; } .BoundingBoxes .FeatureBox { - border: 3px solid rgba(255,0,0,0.75); + border: 3px solid rgba(255, 0, 0, 0.75); + /* box-shadow: 2px 2px 3px rgba(0, 0, 0, 0.2); */ position: absolute; } .BoundingBoxes .FeatureBox .FeatureLabel { - color: #FFF; + color: #fff; font-size: 14px; - background-color: rgba(255,0,0,0.75); + background-color: rgba(255, 0, 0, 0.75); display: inline-block; overflow: hidden; max-width: 100%; padding: 0 5px 2px 2px; } + +.BoundingBoxes .face { + border-color: rgba(255, 255, 0, 0.75); +} + +.BoundingBoxes .face .FeatureLabel { + color: #000; + background-color: rgba(255, 255, 0, 0.75); +} From 1a1dd31c1b376a4664156eda32d3601cae3b6c9c Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Fri, 22 Jan 2021 18:56:12 +0000 Subject: [PATCH 002/110] PhotoTag extra_data --- photonix/classifiers/face_detection/model.py | 1 - .../migrations/0004_phototag_extra_data.py | 18 ++++++++++++++++++ photonix/photos/models.py | 2 ++ 3 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 photonix/photos/migrations/0004_phototag_extra_data.py diff --git a/photonix/classifiers/face_detection/model.py b/photonix/classifiers/face_detection/model.py index 8e81aca3..2006cba9 100644 --- a/photonix/classifiers/face_detection/model.py +++ b/photonix/classifiers/face_detection/model.py @@ -42,7 +42,6 @@ def load_graph(self, graph_file): def predict(self, image_file, min_score=0.99): image = Image.open(image_file) image = np.asarray(image) - # detector = MTCNN() results = self.graph.detect_faces(image) return list(filter(lambda f: f['confidence'] > min_score, results)) diff --git a/photonix/photos/migrations/0004_phototag_extra_data.py b/photonix/photos/migrations/0004_phototag_extra_data.py new file mode 100644 index 00000000..960ea218 --- /dev/null +++ b/photonix/photos/migrations/0004_phototag_extra_data.py @@ -0,0 +1,18 @@ +# Generated by Django 3.0.7 on 2021-01-22 18:54 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('photos', '0003_auto_20201229_1329'), + ] + + operations = [ + migrations.AddField( + model_name='phototag', + name='extra_data', + field=models.TextField(null=True), + ), + ] diff --git a/photonix/photos/models.py b/photonix/photos/models.py index ddf5a541..90c1df86 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -245,6 +245,8 @@ class PhotoTag(UUIDModel, VersionedModel): position_y = models.FloatField(null=True) size_x = models.FloatField(null=True) size_y = models.FloatField(null=True) + # A place to store extra JSON data such as face feature positions for eyes, nose and mouth + extra_data = models.TextField(null=True) class Meta: ordering = ['-significance'] From 4d58c5f688e6e02c717539d3dcfc03f3f5009844 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Sat, 17 Apr 2021 14:56:04 +0530 Subject: [PATCH 003/110] completed the task --- .../photos/management/commands/create_user.py | 81 +++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 photonix/photos/management/commands/create_user.py diff --git a/photonix/photos/management/commands/create_user.py b/photonix/photos/management/commands/create_user.py new file mode 100644 index 00000000..a023ec8c --- /dev/null +++ b/photonix/photos/management/commands/create_user.py @@ -0,0 +1,81 @@ + +import sys +from django.contrib.auth import get_user_model +from django.core.management.base import BaseCommand +from photonix.photos.models import Library, LibraryUser + +User = get_user_model() + + +class Command(BaseCommand): + """Management command to create user and assign him to libararies.""" + + help = 'Assign library to user' + + def create_user(self, username, password): + """To create user and assign him to libraries.""" + if not username: + username = input(" Please enter username : ") + if User.objects.filter(username=username).exists(): + print("\n User already exists with username ", username, ". ") + self.show_libraries_list(User.objects.get(username=username)) + else: + self.validate_password(username, password) + + def show_libraries_list(self, user): + """Method to show library list.""" + print("\n Here is the list of libraries.\n ") + lib_num_obj_pair_list = [] + lib_sequence_list = [] + for count, lib_obj in enumerate(Library.objects.all(), start=1): + print(" ", count, " ", lib_obj.name) + lib_num_obj_pair_list.append((count, lib_obj)) + lib_sequence_list.append(count) + self.assign_user_to_library(lib_num_obj_pair_list, user, lib_sequence_list) + + def validate_password(self, username, password=None): + """Method to validate the password.""" + if not password: + password = input("\n Please enter password : ") + if len(password) >= 8: + user = User.objects.create(username=username) + user.set_password(password) + user.save() + print(" User created with name ", username) + self.show_libraries_list(user) + else: + print(" Password must be at least 8 characters long!") + self.validate_password(username) + + def assign_user_to_library(self, lib_num_obj_pair_list, user, lib_sequence_list): + """Method to assign user to selected libarary.""" + entered_lib_num = input("\n Please enter a library number. ") + if not (entered_lib_num.isdigit() and int(entered_lib_num) in lib_sequence_list): + print(" You have entered invalid library number.") + self.assign_user_to_library(lib_num_obj_pair_list, user, lib_sequence_list) + for sequence_number, obj in lib_num_obj_pair_list: + if int(entered_lib_num) == sequence_number: + LibraryUser.objects.get_or_create(library=obj, user=user, owner=True) + print(" User ", user.username, "assigned to library ", obj.name, "\n") + self.continue_the_process(lib_num_obj_pair_list, user, lib_sequence_list) + + def continue_the_process(self, lib_num_obj_pair_list, user, lib_sequence_list): + """Method to continue the process if user wants to allocate user object to another libraries.""" + continue_or_not = input(" Do you want to add user to another library ?. Enter Y or N: ") + if continue_or_not.upper() == 'Y': + self.assign_user_to_library(lib_num_obj_pair_list, user, lib_sequence_list) + elif continue_or_not.upper() == 'N': + sys.exit() # we can also write here 'pass' but to avoid unnecessary loop running we used exit() + else: + print(" Please enter only Y or N") + self.continue_the_process(lib_num_obj_pair_list, user, lib_sequence_list) + + def add_arguments(self, parser): + """To pass argumentes in management command.""" + # Optional or named arguments + parser.add_argument('--username', type=str, help='Take username') + parser.add_argument('--password', type=str, help='Take password') + + def handle(self, *args, **options): + """Method in which we call management command with passed arguments.""" + self.create_user(options.get('username'), options.get('password')) From d7c5ae6ca7b93c5659cd005c12d95428507ac302 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Thu, 29 Apr 2021 21:41:49 +0530 Subject: [PATCH 004/110] Add animation scroll functionality using css keyframes. --- ui/src/static/css/Filters.css | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/ui/src/static/css/Filters.css b/ui/src/static/css/Filters.css index f1ead7fe..1fbbfdd1 100644 --- a/ui/src/static/css/Filters.css +++ b/ui/src/static/css/Filters.css @@ -11,8 +11,18 @@ .FiltersContent { width: max-content; - margin: 0 0 0 40px; + margin: 0; transition: opacity 1000ms; + animation: ani linear 2s alternate infinite; + animation-iteration-count:2; + padding-left: 40px; +} +/* For auto scroll. */ +@keyframes ani { + 0% { margin-left: 0; transform: translate3d(0, 0, 0);} + 25% { margin-left: 0; transform: translate3d(0, 0, 0);} + 75% { margin-left: 0; transform: translate3d(0, 0, 0);} + 100% { margin-left: 0; transform: translate3d(-200px, 0, 0);} } .FiltersContent .filterGradient { From 6f42d0041dd9568a30f7872cc9886a2626e9f596 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Sun, 2 May 2021 21:30:21 +0530 Subject: [PATCH 005/110] Update keyframe name for css. --- ui/src/static/css/Filters.css | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/ui/src/static/css/Filters.css b/ui/src/static/css/Filters.css index 1fbbfdd1..7e710477 100644 --- a/ui/src/static/css/Filters.css +++ b/ui/src/static/css/Filters.css @@ -12,13 +12,16 @@ .FiltersContent { width: max-content; margin: 0; + padding-left: 40px; +} + +.AutoAnimation { transition: opacity 1000ms; - animation: ani linear 2s alternate infinite; + animation: autoScroll linear 2s alternate; animation-iteration-count:2; - padding-left: 40px; } /* For auto scroll. */ -@keyframes ani { +@keyframes autoScroll { 0% { margin-left: 0; transform: translate3d(0, 0, 0);} 25% { margin-left: 0; transform: translate3d(0, 0, 0);} 75% { margin-left: 0; transform: translate3d(0, 0, 0);} From 42d84a4b3f599755a2e129909ea651b3a112f26e Mon Sep 17 00:00:00 2001 From: Gyan P Date: Sun, 2 May 2021 21:34:52 +0530 Subject: [PATCH 006/110] Add serachAreaExpanded prop and pass to another component. --- ui/src/components/Browse.js | 2 +- ui/src/components/Search.js | 2 ++ ui/src/containers/FiltersContainer.js | 3 ++- ui/src/containers/SearchContainer.js | 4 +++- 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/ui/src/components/Browse.js b/ui/src/components/Browse.js index 87431171..56133e6c 100644 --- a/ui/src/components/Browse.js +++ b/ui/src/components/Browse.js @@ -116,7 +116,6 @@ const Browse = ({ 'searchExpanded', window.innerHeight > 850 ? true : false ) - let content = mode === 'MAP' ? ( @@ -141,6 +140,7 @@ const Browse = ({ onFilterToggle={onFilterToggle} onClearFilters={onClearFilters} updateSearchText={updateSearchText} + searchAreaExpand={expanded} />
diff --git a/ui/src/components/Search.js b/ui/src/components/Search.js index ce460895..12adc477 100644 --- a/ui/src/components/Search.js +++ b/ui/src/components/Search.js @@ -25,6 +25,7 @@ const Search = ({ onClearFilters, search, updateSearchText, + searchAreaExpand }) => { return ( @@ -38,6 +39,7 @@ const Search = ({ ) diff --git a/ui/src/containers/FiltersContainer.js b/ui/src/containers/FiltersContainer.js index 0a9c678b..8765a62c 100644 --- a/ui/src/containers/FiltersContainer.js +++ b/ui/src/containers/FiltersContainer.js @@ -72,7 +72,7 @@ function createFilterSelection(sectionName, data, prefix = 'tag') { } } -const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { +const FiltersContainer = ({ selectedFilters, onFilterToggle, searchAreaExpand }) => { const user = useSelector((state) => state.user) // Using user here from Redux store so we can wait for any JWT tokens to be refreshed before running GraphQL queries that require authentication const activeLibrary = useSelector(getActiveLibrary) let filtersStr = '' @@ -216,6 +216,7 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { return ( diff --git a/ui/src/containers/SearchContainer.js b/ui/src/containers/SearchContainer.js index bd4a6327..a66ab6c8 100644 --- a/ui/src/containers/SearchContainer.js +++ b/ui/src/containers/SearchContainer.js @@ -9,6 +9,8 @@ export default class SearchContainer extends React.Component { search={this.props.search} onFilterToggle={this.props.onFilterToggle} onClearFilters={this.props.onClearFilters} - updateSearchText={this.props.updateSearchText} /> + updateSearchText={this.props.updateSearchText} + searchAreaExpand={this.props.searchAreaExpand} + /> } } From 910da36c08ec4fe6d727f7ba3df5fb3de29b4c16 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Sun, 2 May 2021 21:36:12 +0530 Subject: [PATCH 007/110] Create a funtion to endAnimation. --- ui/src/components/ScrollArea.js | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/ui/src/components/ScrollArea.js b/ui/src/components/ScrollArea.js index 3b8cdeb6..31364838 100644 --- a/ui/src/components/ScrollArea.js +++ b/ui/src/components/ScrollArea.js @@ -135,9 +135,14 @@ export default class ScrollArea extends React.Component { this.positionViewport() } + // To stop auto scroll animation after one time. + stopScrollAnimation = (e) => { + localStorage.setItem('autoScrollAnimation', true); + } + render = () => ( <> -
+
{this.props.children}
From 86de44173011817f372a1882583fb0bda91d3c98 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Sun, 2 May 2021 21:37:16 +0530 Subject: [PATCH 008/110] Add props and conditionally rendring a div. --- ui/src/components/Filters.js | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/ui/src/components/Filters.js b/ui/src/components/Filters.js index a4837a71..1e248dc2 100644 --- a/ui/src/components/Filters.js +++ b/ui/src/components/Filters.js @@ -22,7 +22,7 @@ const railStyle = { backgroundColor: '#484848', } -const Filters = ({ data, selectedFilters, onToggle }) => { +const Filters = ({ data, selectedFilters, onToggle, searchAreaExpand }) => { const [values, setValues] = useState({ 'ISO Speed': [], 'Focal Length': [], @@ -31,7 +31,6 @@ const Filters = ({ data, selectedFilters, onToggle }) => { Rating: [], }) const [isDomainAvail, setIsDomainAvail] = useState(false) - useEffect(() => { const vals = [] data.map((group) => { @@ -109,7 +108,7 @@ const Filters = ({ data, selectedFilters, onToggle }) => { return ( {isDomainAvail && ( -
+
{data.map((group) => { let items = '' let filterGroupExtraStyles = {} From 8a24e2aed82c4e05381b0f330692e12ec5ce4b93 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Wed, 5 May 2021 15:59:08 +0530 Subject: [PATCH 009/110] implement autocomplete when search string matches availabel tags --- ui/src/components/Search.js | 5 +- ui/src/components/SearchInput.js | 94 ++++++++++++++++++++++- ui/src/containers/AppContainer.js | 4 +- ui/src/containers/FiltersContainer.js | 21 +++-- ui/src/containers/SearchInputContainer.js | 1 + ui/src/static/css/SearchInput.css | 53 ++++++++++++- 6 files changed, 162 insertions(+), 16 deletions(-) diff --git a/ui/src/components/Search.js b/ui/src/components/Search.js index ce460895..f7b3a940 100644 --- a/ui/src/components/Search.js +++ b/ui/src/components/Search.js @@ -1,4 +1,4 @@ -import React from 'react' +import React, { useState } from 'react' import styled from '@emotion/styled' import FiltersContainer from '../containers/FiltersContainer' @@ -26,6 +26,7 @@ const Search = ({ search, updateSearchText, }) => { + const [filters, setFilters] = useState([]) return ( ) diff --git a/ui/src/components/SearchInput.js b/ui/src/components/SearchInput.js index 656a7b83..56628d44 100644 --- a/ui/src/components/SearchInput.js +++ b/ui/src/components/SearchInput.js @@ -1,6 +1,5 @@ -import React from 'react' +import React, { useState, useEffect } from 'react' import PropTypes from 'prop-types' - import '../static/css/SearchInput.css' import { ReactComponent as CloseIcon } from '../static/images/close.svg' import { ReactComponent as ObjectsIcon } from '../static/images/label.svg' @@ -35,10 +34,95 @@ const SearchInput = ({ onFilterToggle, onClearFilters, onSearchTextChange, + filters }) => { + const [activeOption, setActiveOption] = useState(0) + const [filteredOptions, setFilteredOptions] = useState([]) + const [showOptions, setShowOptions] = useState(false) + const [options, setOptions] = useState([]) + + const prepareOptions = () => { + let searchOptions = [] + filters.map(f => { + f.items.map(i => { + i['type'] = f.name + searchOptions.push(i) + return i + }) + return f + }) + setOptions(searchOptions) + } + useEffect(() => { + if (filters.length) prepareOptions() + }, [filters]) + + const handleOnChange = (e) => { + onSearchTextChange(e.target.value) + const userInput = e.currentTarget.value + const filteredOptions = options.filter( + (optionName) => + optionName.name.toLowerCase().indexOf(userInput.toLowerCase()) > -1 + ) + setActiveOption(0) + setFilteredOptions(filteredOptions) + setShowOptions(true) + } + + const onKeyDown = (e) => { + if (e.keyCode === 13) { + onSearchTextChange(filteredOptions[activeOption].name) + setActiveOption(0) + setShowOptions(false) + } else if (e.keyCode === 38) { + if (activeOption === 0) return + setActiveOption(activeOption - 1) + } else if (e.keyCode === 40) { + if (activeOption === filteredOptions.length - 1) return + setActiveOption(activeOption + 1) + } + } + + const handleOnClick = (index) => { + setActiveOption(0) + setFilteredOptions([]) + setShowOptions(false) + onSearchTextChange(filteredOptions[index].name) + } + + let optionList; + if (showOptions && search) { + if (filteredOptions.length) { + optionList = ( +
    + {filteredOptions.map((opt, index) => { + let className + if (index === activeOption) className = 'option-active' + + let icon = React.createElement(GROUP_ICONS[opt.type], { + className: 'groupIcon', + alt: opt.group, + }) + return ( +
  • handleOnClick(index)}> +
    {icon}{opt.name}
    {opt.type} +
  • + ); + })} +
+ ); + } else { + optionList = ( +
+ No Option! +
+ ); + } + } + return (
-
    +
      {selectedFilters.map((filter) => { let icon = React.createElement(GROUP_ICONS[filter.group], { className: 'groupIcon', @@ -58,8 +142,10 @@ const SearchInput = ({ type="text" placeholder="Search" value={search} - onChange={onSearchTextChange} + onChange={handleOnChange} + onKeyDown={onKeyDown} /> + {optionList}
diff --git a/ui/src/containers/AppContainer.js b/ui/src/containers/AppContainer.js index c717188d..97ea3e97 100644 --- a/ui/src/containers/AppContainer.js +++ b/ui/src/containers/AppContainer.js @@ -75,8 +75,8 @@ export default class AppContainer extends React.Component { }) } - updateSearchText = (event) => { - this.setState({ search: event.target.value }) + updateSearchText = (value) => { + this.setState({ search: value }) } render = () => { diff --git a/ui/src/containers/FiltersContainer.js b/ui/src/containers/FiltersContainer.js index 0a9c678b..6a88d3ee 100644 --- a/ui/src/containers/FiltersContainer.js +++ b/ui/src/containers/FiltersContainer.js @@ -1,4 +1,4 @@ -import React, { useEffect } from 'react' +import React, { useEffect, useState } from 'react' import { useQuery } from '@apollo/react-hooks' import { useSelector } from 'react-redux' import gql from 'graphql-tag' @@ -72,14 +72,16 @@ function createFilterSelection(sectionName, data, prefix = 'tag') { } } -const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { +const FiltersContainer = ({ selectedFilters, onFilterToggle, setFilters }) => { const user = useSelector((state) => state.user) // Using user here from Redux store so we can wait for any JWT tokens to be refreshed before running GraphQL queries that require authentication + const [isFiltersAvail, setIsFiltersAvail] = useState(false) const activeLibrary = useSelector(getActiveLibrary) + let filterData = [] let filtersStr = '' if (activeLibrary) { filtersStr = `${selectedFilters.map((filter) => filter.id).join(' ')}` } - + const removebleTags = ["Aperture", "Exposure", "ISO Speed", "Focal Length", "Rating", "Flash"] let variables = {} variables = { libraryId: activeLibrary?.id, multiFilter: filtersStr } const { loading, error, data, refetch } = useQuery( @@ -91,9 +93,16 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { ) useEffect(() => { refetch() - }, [activeLibrary, refetch]) - + + useEffect(() => { + if (isFiltersAvail && filterData.length) { + const autoSuggestionFilters = filterData.filter(f => { + return removebleTags.indexOf(f.name) === -1 + }) + setFilters(autoSuggestionFilters) + } + }, [isFiltersAvail]) const getFilterdData = (type, array) => { const filterArr = selectedFilters.filter((s) => s.group === type) let data = [] @@ -109,7 +118,6 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { if (loading) return if (error) return `Error! ${error.message}` - let filterData = [] if (data) { if (data.allGenericTags.length) { filterData.push( @@ -212,6 +220,7 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { ) ) } + if (!isFiltersAvail) setIsFiltersAvail(true) } return ( ) } diff --git a/ui/src/static/css/SearchInput.css b/ui/src/static/css/SearchInput.css index 6f04367c..69612074 100644 --- a/ui/src/static/css/SearchInput.css +++ b/ui/src/static/css/SearchInput.css @@ -36,8 +36,8 @@ .SearchInput li.filter svg.removeIcon:hover { opacity: 0.6; } -.SearchInput input[type='text'] { - width: 150px; +.SearchInput input { + width: 100%; height: 30px; flex: 1; min-width: 150px; @@ -49,7 +49,7 @@ color: #fff; line-height: 1; } -.SearchInput input[type='text']::placeholder { +.SearchInput input::placeholder { /* Chrome, Firefox, Opera, Safari 10.1+ */ color: rgba(255, 255, 255, 0.6); opacity: 1; /* Firefox */ @@ -64,3 +64,50 @@ .SearchInput svg.clearAll:hover { opacity: 0.6; } + +ul.options { + display: block; + list-style: none; + transition: width 0.3s; + margin: auto; + position: absolute; + top: 100%; + left: 0; + width: 100%; + padding: 0; + background:#484848; + opacity: 0.9; + /*max-height: 200px;*/ + overflow: auto; + overflow-y: scroll; + z-index: 2; +} + +ul.options li { + display: flex; + margin: 0; + padding: 10px; + font-size: 14px; + width: 100%; + transition: 0.3s all; + cursor: pointer; + color: white; + justify-content: space-between; + align-items: center; +} +ul.options li svg { + display: inline-block; + vertical-align: middle; + margin-right: 5px; + filter: invert(0.9); +} +ul.options li:hover { + background-color: #545454; +} + +ul.options li.option-active { + background-color: #545454; +} +.no-options { + color: white; +} From 27d21ca4d55ed758b15059f19927892382d85792 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Thu, 6 May 2021 12:17:00 +0530 Subject: [PATCH 010/110] Backend Changes for event model --- photonix/classifiers/event/__init__.py | 0 photonix/classifiers/event/info.py | 3 ++ photonix/classifiers/event/model.py | 71 ++++++++++++++++++++++++++ photonix/photos/models.py | 1 + photonix/photos/utils/metadata.py | 1 - 5 files changed, 75 insertions(+), 1 deletion(-) create mode 100644 photonix/classifiers/event/__init__.py create mode 100644 photonix/classifiers/event/info.py create mode 100644 photonix/classifiers/event/model.py diff --git a/photonix/classifiers/event/__init__.py b/photonix/classifiers/event/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/photonix/classifiers/event/info.py b/photonix/classifiers/event/info.py new file mode 100644 index 00000000..87213f17 --- /dev/null +++ b/photonix/classifiers/event/info.py @@ -0,0 +1,3 @@ + +name = 'event' +version = 20210505 diff --git a/photonix/classifiers/event/model.py b/photonix/classifiers/event/model.py new file mode 100644 index 00000000..c03a7911 --- /dev/null +++ b/photonix/classifiers/event/model.py @@ -0,0 +1,71 @@ +import operator +import sys +from pathlib import Path +from photonix.photos.utils.metadata import (PhotoMetadata, parse_datetime) +import datetime + + + +class EventModel: + version = 20210505 + approx_ram_mb = 120 + max_num_workers = 2 + + def __init__(self): + self.events = { + 'Christmas Day': '25 December', + 'New Year': '31st December 12:00PM to 1st January 12:00PM', + 'Halloween': '31st October', + "Valentine's Day": '14th February', + } + + def predict(self, image_file): + metadata = PhotoMetadata(image_file) + date_taken = None + possible_date_keys = ['Date/Time Original', 'Date Time Original', 'Date/Time', 'Date Time', 'GPS Date/Time', 'Modify Date', 'File Modification Date/Time'] + for date_key in possible_date_keys: + date_taken = parse_datetime(metadata.get(date_key)) + if date_taken: + events = { + datetime.date(date_taken.year, 12, 25): "Christmas Day", + datetime.date(date_taken.year, 10, 31):"Halloween", + datetime.date(date_taken.year, 2, 14):"Valentine's Day", + datetime.date(date_taken.year, 12, 31): "New Year Start", + datetime.date(date_taken.year, 1, 1):"New Year End", + } + if events.get(date_taken.date()): + if events.get(date_taken.date()).startswith("New Year"): + # check lgana h ki 31st December 12:00PM to 1st January 12:00PM 12 pm wala + return "New Year" + return events.get(date_taken.date()) + return date_taken + + +def run_on_photo(photo_id): + model = EventModel() + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + from photonix.classifiers.runners import results_for_model_on_photo, get_or_create_tag + photo, results = results_for_model_on_photo(model, photo_id) + if photo: + from django.utils import timezone + from photonix.photos.models import PhotoTag + photo.clear_tags(source='C', type='E') + for name in results: + tag = get_or_create_tag(library=photo.library, name=name, type='C', source='C', ordering=model.colors[name][1]) + PhotoTag(photo=photo, tag=tag, source='C', confidence=score, significance=score).save() + photo.classifier_color_completed_at = timezone.now() + photo.classifier_color_version = getattr(model, 'version', 0) + photo.save() + + return photo, results + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print('Argument required: image file path') + exit(1) + + _, results = run_on_photo(sys.argv[1]) + + for result in results: + print('{} (score: {:0.10f})'.format(result[0], result[1])) diff --git a/photonix/photos/models.py b/photonix/photos/models.py index ebc1a169..ecd25a3e 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -219,6 +219,7 @@ def base_image_path(self): ('C', 'Color'), ('S', 'Style'), # See Karayev et al.: Recognizing Image Style ('G', 'Generic'), # Tags created by user + ('E', 'Event'), # Checked image taken date is any festival date. ) diff --git a/photonix/photos/utils/metadata.py b/photonix/photos/utils/metadata.py index 0c45b79a..af3184c2 100644 --- a/photonix/photos/utils/metadata.py +++ b/photonix/photos/utils/metadata.py @@ -78,7 +78,6 @@ def get_datetime(path): if not matched: matched = re.search(r'\D((19|20)[0-9]{2})([0-9]{2})([0-9]{2})\D', fn) if matched: - # import pdb; pdb.set_trace() date_str = '{}-{}-{}'.format(matched.group(1), matched.group(3), matched.group(4)) return datetime.strptime(date_str, '%Y-%m-%d') return None From 93948df6e298720b603fa12f76e54bf710367bf1 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Mon, 10 May 2021 19:42:56 +0530 Subject: [PATCH 011/110] Update init and info file. --- photonix/classifiers/event/__init__.py | 1 + photonix/classifiers/info.py | 1 + 2 files changed, 2 insertions(+) diff --git a/photonix/classifiers/event/__init__.py b/photonix/classifiers/event/__init__.py index e69de29b..9898e544 100644 --- a/photonix/classifiers/event/__init__.py +++ b/photonix/classifiers/event/__init__.py @@ -0,0 +1 @@ +from .model import EventModel, run_on_photo diff --git a/photonix/classifiers/info.py b/photonix/classifiers/info.py index c14650d0..b91c3611 100644 --- a/photonix/classifiers/info.py +++ b/photonix/classifiers/info.py @@ -3,4 +3,5 @@ 'location', 'object', 'style', + 'event', ] From 7aa237eed5bb0c2f7f5c7fd07b39dcc4ae9cc40c Mon Sep 17 00:00:00 2001 From: Gyan P Date: Mon, 10 May 2021 19:43:43 +0530 Subject: [PATCH 012/110] Update date logic and function. --- photonix/classifiers/event/model.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/photonix/classifiers/event/model.py b/photonix/classifiers/event/model.py index c03a7911..7aac1303 100644 --- a/photonix/classifiers/event/model.py +++ b/photonix/classifiers/event/model.py @@ -33,13 +33,15 @@ def predict(self, image_file): datetime.date(date_taken.year, 12, 31): "New Year Start", datetime.date(date_taken.year, 1, 1):"New Year End", } + date_taken = datetime.datetime(date_taken.year, 12, 31, 2, 30) if events.get(date_taken.date()): if events.get(date_taken.date()).startswith("New Year"): - # check lgana h ki 31st December 12:00PM to 1st January 12:00PM 12 pm wala - return "New Year" + start_of_day = datetime.datetime.combine(datetime.date(date_taken.year, 12, 31), datetime.datetime.min.time()) + end_of_day = start_of_day + datetime.timedelta(days=1) + if start_of_day <= date_taken.replace(tzinfo=None) <= end_of_day: + return "New Year" return events.get(date_taken.date()) - return date_taken - + return date_taken def run_on_photo(photo_id): model = EventModel() @@ -67,5 +69,4 @@ def run_on_photo(photo_id): _, results = run_on_photo(sys.argv[1]) - for result in results: - print('{} (score: {:0.10f})'.format(result[0], result[1])) + print(results) \ No newline at end of file From a22f775099cb54d2eaf33c3298ee795b8e5e4192 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Tue, 11 May 2021 21:28:37 +0100 Subject: [PATCH 013/110] Apollo RetryLink tweaks and linting fixes --- ui/src/components/Init.js | 36 +++++++++++++++++++++++--------- ui/src/components/PhotoDetail.js | 6 +++--- 2 files changed, 29 insertions(+), 13 deletions(-) diff --git a/ui/src/components/Init.js b/ui/src/components/Init.js index bdea4552..78d25cf6 100644 --- a/ui/src/components/Init.js +++ b/ui/src/components/Init.js @@ -1,8 +1,15 @@ import React from 'react' import { Provider } from 'react-redux' import { createStore } from 'redux' -import { ApolloClient, ApolloLink, ApolloProvider, from, HttpLink, InMemoryCache } from '@apollo/client' -import { RetryLink } from "@apollo/client/link/retry"; +import { + ApolloClient, + ApolloLink, + ApolloProvider, + from, + HttpLink, + InMemoryCache, +} from '@apollo/client' +import { RetryLink } from '@apollo/client/link/retry' import { Router } from 'react-router-dom' import { ModalContainer } from 'react-router-modal' // import { ThemeProvider, CSSReset } from '@chakra-ui/core' @@ -22,26 +29,35 @@ window.photonix = { } const additiveLink = from([ - new RetryLink(), + new RetryLink({ + delay: { + initial: 500, + max: Infinity, + jitter: true, + }, + attempts: { + max: 30, + }, + }), new ApolloLink((operation, forward) => { return forward(operation).map((data) => { // Raise GraphQL errors as exceptions that trigger RetryLink when re-authentication is in progress if (data && data.errors && data.errors.length > 0) { - throw new Error('GraphQL Operational Error'); + throw new Error('GraphQL Operational Error') } - return data; - }); + return data + }) }), new HttpLink({ uri: '/graphql', credentials: 'same-origin', // Required for older versions of Chromium (~v58) - }) -]); + }), +]) const client = new ApolloClient({ cache: new InMemoryCache(), - link: additiveLink -}); + link: additiveLink, +}) const Init = ({ children }) => { const isMobileApp = navigator.userAgent.indexOf('PhotonixMobileApp') > -1 diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index 9c740d5c..ed278627 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -129,14 +129,14 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { history.push(`/photo/${id}`) setNumHistoryPushes(numHistoryPushes + 1) } - }, [prevNextPhotos]) + }, [prevNextPhotos, numHistoryPushes]) const nextPhoto = useCallback(() => { let id = prevNextPhotos.next[0] if (id) { history.push(`/photo/${id}`) setNumHistoryPushes(numHistoryPushes + 1) } - }, [prevNextPhotos]) + }, [prevNextPhotos, numHistoryPushes]) useEffect(() => { const handleKeyDown = (event) => { @@ -187,7 +187,7 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { onClick={() => { if ( history.length - numHistoryPushes > 2 || - document.referrer != '' + document.referrer !== '' ) { history.go(-(numHistoryPushes + 1)) } else { From 8842c356de642a4df4a7dc42c17d51a6257913da Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Tue, 11 May 2021 22:26:00 +0100 Subject: [PATCH 014/110] Allow setting SECRET_KEY via environment --- photonix/web/settings.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/photonix/web/settings.py b/photonix/web/settings.py index 8428b0c5..6edb27d8 100644 --- a/photonix/web/settings.py +++ b/photonix/web/settings.py @@ -20,7 +20,7 @@ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = str(Path(__file__).parent.parent.resolve()) -SECRET_KEY = utils.get_random_secret_key() +SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', utils.get_random_secret_key()) DEBUG = os.environ.get('ENV', 'prd') != 'prd' From fdbfeb0012f04b097716cf320ecb9751233d3adc Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Tue, 11 May 2021 22:59:01 +0100 Subject: [PATCH 015/110] Attempt to detect 'Error decoding signature' and get user to re-authenticate --- ui/src/components/Init.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ui/src/components/Init.js b/ui/src/components/Init.js index 78d25cf6..a0d2e46a 100644 --- a/ui/src/components/Init.js +++ b/ui/src/components/Init.js @@ -18,6 +18,7 @@ import { ThemeProvider, ColorModeProvider } from '@chakra-ui/core' import history from '../history' import reducers from './../stores' import customTheme from '../theme' +import { logOut } from '../auth' export const store = createStore( reducers, @@ -43,6 +44,10 @@ const additiveLink = from([ return forward(operation).map((data) => { // Raise GraphQL errors as exceptions that trigger RetryLink when re-authentication is in progress if (data && data.errors && data.errors.length > 0) { + if (data.errors[0].message === 'Error decoding signature') { + // Probably the Django SECRET_KEY changed so the user needs to re-authenticate. + logOut() + } throw new Error('GraphQL Operational Error') } return data From 112fd3e3bad0770aae8088994758a3e879208d3e Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 12 May 2021 18:22:53 +0100 Subject: [PATCH 016/110] Consistent secret key generation (fixes GraphQL JWT 'Error decoding signature' errors), non-blocking startup photo scanning --- .../management/commands/rescan_photos.py | 13 +++++--- .../commands/rescan_photos_periodically.py | 12 ++++--- .../management/commands/reset_redis_locks.py | 3 +- photonix/web/settings.py | 4 +-- photonix/web/utils.py | 33 +++++++++++++++++++ system/run.sh | 5 +-- 6 files changed, 53 insertions(+), 17 deletions(-) create mode 100644 photonix/web/utils.py diff --git a/photonix/photos/management/commands/rescan_photos.py b/photonix/photos/management/commands/rescan_photos.py index 99ac025c..745cc2a3 100644 --- a/photonix/photos/management/commands/rescan_photos.py +++ b/photonix/photos/management/commands/rescan_photos.py @@ -1,9 +1,12 @@ +import os + from django.conf import settings from django.core.management.base import BaseCommand +import redis +from redis_lock import Lock from photonix.photos.utils.organise import rescan_photo_libraries from photonix.photos.utils.system import missing_system_dependencies -# from web.utils import notify_ui class Command(BaseCommand): @@ -19,9 +22,9 @@ def rescan_photos(self, paths): exit(1) rescan_photo_libraries(paths) - print('Completed') + print('Rescan complete') def handle(self, *args, **options): - # notify_ui('photo_dirs_scanning', True) - self.rescan_photos(options['paths']) - # notify_ui('photo_dirs_scanning', False) + r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) + with Lock(r, 'rescan_photos'): + self.rescan_photos(options['paths']) diff --git a/photonix/photos/management/commands/rescan_photos_periodically.py b/photonix/photos/management/commands/rescan_photos_periodically.py index 2ac184c9..eb7e35cb 100644 --- a/photonix/photos/management/commands/rescan_photos_periodically.py +++ b/photonix/photos/management/commands/rescan_photos_periodically.py @@ -1,6 +1,10 @@ +import os from time import sleep + from django.conf import settings from django.core.management.base import BaseCommand +import redis +from redis_lock import Lock from photonix.photos.utils.organise import rescan_photo_libraries from photonix.photos.utils.system import missing_system_dependencies @@ -22,11 +26,11 @@ def rescan_photos(self, paths): print('Rescan complete') def handle(self, *args, **options): + r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) try: while True: - # TODO: Add a lock in here because DB corruption occurs if rescan_photos is called while it's still already running - self.rescan_photos(options['paths']) - - sleep(60 * 15) # Sleep for an hour + with Lock(r, 'rescan_photos'): + self.rescan_photos(options['paths']) + sleep(60 * 60) # Sleep for an hour except KeyboardInterrupt: pass \ No newline at end of file diff --git a/photonix/photos/management/commands/reset_redis_locks.py b/photonix/photos/management/commands/reset_redis_locks.py index c119d316..2ef7cfb5 100644 --- a/photonix/photos/management/commands/reset_redis_locks.py +++ b/photonix/photos/management/commands/reset_redis_locks.py @@ -1,9 +1,8 @@ import os -import redis_lock - from django.core.management.base import BaseCommand import redis +import redis_lock r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) diff --git a/photonix/web/settings.py b/photonix/web/settings.py index 6edb27d8..8397662b 100644 --- a/photonix/web/settings.py +++ b/photonix/web/settings.py @@ -14,13 +14,13 @@ import os from pathlib import Path -from django.core.management import utils +from .utils import get_secret_key # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = str(Path(__file__).parent.parent.resolve()) -SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY', utils.get_random_secret_key()) +SECRET_KEY = get_secret_key() DEBUG = os.environ.get('ENV', 'prd') != 'prd' diff --git a/photonix/web/utils.py b/photonix/web/utils.py new file mode 100644 index 00000000..fbca1f5b --- /dev/null +++ b/photonix/web/utils.py @@ -0,0 +1,33 @@ +import os + +from django.core.management import utils +import redis +from redis_lock import Lock + + +def get_secret_key(): + # To avoid each installation having the same Django SECERT_KEY we generate + # a random one and store it in Redis. We have to store it somewhere + # central like Redis because if each worker generated it's own it would + # cause problems (like JWT "Error decoding signature"). + + secret_key = None + + if 'DJANGO_SECRET_KEY' in os.environ: + secret_key = os.environ.get('DJANGO_SECRET_KEY') + else: + r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) + if r.exists('django_secret_key'): + secret_key = r.get('django_secret_key').decode('utf-8') + else: + # Make sure only first worker generates the key and others get from Redis + with Lock(r, 'django_secret_key_generation_lock'): + if r.exists('django_secret_key'): + secret_key = r.get('django_secret_key').decode('utf-8') + else: + secret_key = utils.get_random_secret_key() + r.set('django_secret_key', secret_key.encode('utf-8')) + + if not secret_key: + raise EnvironmentError('No secret key available') + return secret_key diff --git a/system/run.sh b/system/run.sh index 8d50b807..eb2f2cc3 100755 --- a/system/run.sh +++ b/system/run.sh @@ -22,13 +22,10 @@ if [ "${DEMO}" = "1" ]; then python /srv/photonix/manage.py import_demo_photos fi ->&2 echo "Scanning for new photos" -python /srv/photonix/manage.py rescan_photos - >&2 echo "Resetting Redis lock" python /srv/photonix/manage.py reset_redis_locks ->&2 echo "Reschedule any regeneration of thumbnails or analysis jobs" +>&2 echo "Rescheduling any required upgrade-related tasks" python /srv/photonix/manage.py housekeeping >&2 echo "Starting supervisor" From a17473c2a7c7a99f1d1ee86af93fc2279be2e155 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 12 May 2021 18:42:24 +0100 Subject: [PATCH 017/110] Avoid trying to use Redis in Docker collectstatic --- docker/Dockerfile.prd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index 26fccc48..80e641e3 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -130,7 +130,7 @@ COPY system/supervisord.conf /etc/supervisord.conf ENV PYTHONPATH /srv -RUN python photonix/manage.py collectstatic --noinput --link +RUN DJANGO_SECRET_KEY=test python photonix/manage.py collectstatic --noinput --link CMD ./system/run.sh From 2b363f649a69a73abe45316915167c790a17e084 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Thu, 20 May 2021 00:21:06 +0530 Subject: [PATCH 018/110] Update history method on backArrow. --- ui/src/components/PhotoDetail.js | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index 9c740d5c..b0daff24 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -103,7 +103,7 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { getPrevNextPhotos(state, photoId) ) const [numHistoryPushes, setNumHistoryPushes] = useState(0) - + // TODO: Bring this back so it doesn't get triggered by someone adding a tag with 'i' in it // useEffect(() => { // const handleKeyDown = (event) => { @@ -189,7 +189,8 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { history.length - numHistoryPushes > 2 || document.referrer != '' ) { - history.go(-(numHistoryPushes + 1)) + history.goBack() + // history.go(-(numHistoryPushes + 1)) } else { history.push('/') } From 81a8f2c3b17868565c3fb428ba6e34f0f7de0971 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Thu, 20 May 2021 00:25:42 +0530 Subject: [PATCH 019/110] Create a mapEvent function and useEffect to update zoom and position of map. --- ui/src/components/MapView.js | 52 +++++++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 10 deletions(-) diff --git a/ui/src/components/MapView.js b/ui/src/components/MapView.js index 4ec98787..17b3d694 100644 --- a/ui/src/components/MapView.js +++ b/ui/src/components/MapView.js @@ -1,9 +1,9 @@ -import React from 'react' +import React, {useEffect, useState} from 'react' import PropTypes from 'prop-types' -import { Link } from 'react-router-dom' +import { Link, useHistory } from 'react-router-dom' import { MapContainer, Marker, Popup, TileLayer } from 'react-leaflet' import MarkerClusterGroup from 'react-leaflet-markercluster' - +import {useMapEvent} from "react-leaflet"; import '../static/css/Map.css' import 'react-leaflet-markercluster/dist/styles.min.css' // sass @@ -11,7 +11,7 @@ const MapView = ({ photos, bounds, location, - zoom, + // zoom, maxZoom, hideAttribution, }) => { @@ -23,6 +23,36 @@ const MapView = ({ : '© OpenStreetMap contributors' let tileLayer = + const [latState, setLatState] = useState(30) + const [lngState, setLngState] = useState(0) + const [renderState, setRenderState] = useState(2) + const [map, setMap] = useState(null); + const history = useHistory() + + // Use to check the component comes back from next page and setStates. + useEffect(() => { + if (history.action === "POP"){ + setRenderState(parseInt(localStorage.getItem('zoomLevel1'))) + setLatState(localStorage.getItem('lat')) + setLngState(localStorage.getItem('lng')) + } + }, [history]); + + // Use to handle map events and set new position and zoom value to map. + const MapEvents = () => { + const mapEvents = useMapEvent({ + zoomend: () => { + localStorage.setItem('zoomLevel1', mapEvents.getZoom()) + localStorage.setItem('lat', mapEvents.getCenter().lat) + localStorage.setItem('lng', mapEvents.getCenter().lng) + }, + }); + const position = [latState? latState : mapEvents.getCenter().lat, lngState? lngState : mapEvents.getCenter().lng] + const zoom = renderState? renderState : mapEvents.getZoom() + if(map) map.setView(position, zoom); + return null + } + if (photos) { markers = photos.map((photo, idx) => photo.location ? ( @@ -31,7 +61,7 @@ const MapView = ({ position={[photo.location[0], photo.location[1]]} > - + {setMap(map)}} > {tileLayer} + {markers}
@@ -60,7 +92,7 @@ const MapView = ({ return (
- + {tileLayer} {markers} @@ -73,13 +105,13 @@ MapView.propTypes = { photos: PropTypes.array, bounds: PropTypes.func, location: PropTypes.array, - zoom: PropTypes.number, + // zoom: PropTypes.number, maxZoom: PropTypes.number, hideAttribution: PropTypes.bool, } MapView.defaultProps = { - zoom: 2, + // zoom: 2, maxZoom: 15, } From 0ee0174e221f118d6ebbba2024004c5901af9f56 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Thu, 20 May 2021 12:59:39 +0530 Subject: [PATCH 020/110] Update state names. --- ui/src/components/MapView.js | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/ui/src/components/MapView.js b/ui/src/components/MapView.js index 17b3d694..f9d3988a 100644 --- a/ui/src/components/MapView.js +++ b/ui/src/components/MapView.js @@ -25,14 +25,14 @@ const MapView = ({ const [latState, setLatState] = useState(30) const [lngState, setLngState] = useState(0) - const [renderState, setRenderState] = useState(2) + const [zoomState, setZoomState] = useState(2) const [map, setMap] = useState(null); const history = useHistory() - // Use to check the component comes back from next page and setStates. + // Use to check the component comes back from next page or not and setStates. useEffect(() => { if (history.action === "POP"){ - setRenderState(parseInt(localStorage.getItem('zoomLevel1'))) + setZoomState(parseInt(localStorage.getItem('mapZoom'))) setLatState(localStorage.getItem('lat')) setLngState(localStorage.getItem('lng')) } @@ -42,13 +42,13 @@ const MapView = ({ const MapEvents = () => { const mapEvents = useMapEvent({ zoomend: () => { - localStorage.setItem('zoomLevel1', mapEvents.getZoom()) + localStorage.setItem('mapZoom', mapEvents.getZoom()) localStorage.setItem('lat', mapEvents.getCenter().lat) localStorage.setItem('lng', mapEvents.getCenter().lng) }, }); const position = [latState? latState : mapEvents.getCenter().lat, lngState? lngState : mapEvents.getCenter().lng] - const zoom = renderState? renderState : mapEvents.getZoom() + const zoom = zoomState? zoomState : mapEvents.getZoom() if(map) map.setView(position, zoom); return null } @@ -77,7 +77,7 @@ const MapView = ({ {setMap(map)}} > From 3306ddc2612608edc468e06159a1f5b0fa76c0fc Mon Sep 17 00:00:00 2001 From: Gyan P Date: Fri, 21 May 2021 17:22:36 +0530 Subject: [PATCH 021/110] Add an image of an download arrow. --- ui/src/static/images/download_arrow.svg | 1 + 1 file changed, 1 insertion(+) create mode 100644 ui/src/static/images/download_arrow.svg diff --git a/ui/src/static/images/download_arrow.svg b/ui/src/static/images/download_arrow.svg new file mode 100644 index 00000000..e40eaee8 --- /dev/null +++ b/ui/src/static/images/download_arrow.svg @@ -0,0 +1 @@ + \ No newline at end of file From 295c80de96acc4fc8e296b04e28d16940aebf5b1 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Fri, 21 May 2021 17:26:08 +0530 Subject: [PATCH 022/110] Add an axios and create a feature to download an image. --- ui/package.json | 1 + ui/src/components/PhotoDetail.js | 38 ++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/ui/package.json b/ui/package.json index 291f8a1a..301fbf53 100644 --- a/ui/package.json +++ b/ui/package.json @@ -11,6 +11,7 @@ "@testing-library/react": "^9.3.2", "@testing-library/user-event": "^7.1.2", "apollo-link-retry": "^2.2.15", + "axios": "^0.21.1", "emotion-theming": "^10.0.27", "graphql": "^15.5.0", "history": "^4.10.1", diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index 9c740d5c..11955461 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -9,11 +9,13 @@ import PhotoMetadata from './PhotoMetadata' import { getSafeArea } from '../stores/layout/selector' import { getPrevNextPhotos } from '../stores/photos/selector' +import { ReactComponent as DownloadIcon } from '../static/images/download_arrow.svg' import { ReactComponent as ArrowBackIcon } from '../static/images/arrow_back.svg' import { ReactComponent as ArrowLeftIcon } from '../static/images/arrow_left.svg' import { ReactComponent as ArrowRightIcon } from '../static/images/arrow_right.svg' import { ReactComponent as InfoIcon } from '../static/images/info.svg' import { ReactComponent as CloseIcon } from '../static/images/close.svg' +import axios from 'axios'; // const I_KEY = 73 const LEFT_KEY = 37 @@ -76,6 +78,14 @@ const Container = styled('div')` cursor: pointer; z-index: 10; } + .showDownloadIcon { + position: absolute; + right: 50px; + top: 10px; + filter: invert(0.9); + cursor: pointer; + z-index: 10; + } /* When two boxes can no longer fit next to each other */ @media all and (max-width: 500px) { @@ -168,6 +178,25 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { sizeY: objectTag.sizeY, } }) + + // Use to download an image when user click on download button. + const downloadImage = () => { + axios({ + url: `/thumbnailer/photo/3840x3840_contain_q75/${photoId}/`, + method: "GET", + responseType: "blob" // important + }).then(response => { + const url = window.URL.createObjectURL(new Blob([response.data])); + const link = document.createElement("a"); + link.href = url; + link.setAttribute( + "download", + `${photo.baseFilePath.split(/[\\\/]/).pop()}` + ); + document.body.appendChild(link); + link.click(); + }); + } return ( @@ -224,6 +253,7 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { updatePhotoFile={updatePhotoFile} /> )} + {!showMetadata ? ( { // title="Press [I] key to show/hide photo details" /> )} + + ) } From 6985b4cb7cace3bd04bdf7c7bf2067783ab16a20 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 26 May 2021 11:22:18 +0100 Subject: [PATCH 023/110] Changed to calculate full download URL --- photonix/photos/models.py | 9 ++++++ photonix/photos/schema.py | 4 +++ ui/package.json | 1 - ui/src/components/PhotoDetail.js | 38 ++++++----------------- ui/src/containers/PhotoDetailContainer.js | 1 + 5 files changed, 24 insertions(+), 29 deletions(-) diff --git a/photonix/photos/models.py b/photonix/photos/models.py index f3f66296..fbb1ddcd 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -37,6 +37,9 @@ def rescan(self): for library_path in self.paths: library_path.rescan() + def get_library_path_store(self): + return self.paths.filter(type='St')[0] + LIBRARY_PATH_TYPE_CHOICES = ( ('St', 'Store'), @@ -163,6 +166,12 @@ def base_file(self): def base_image_path(self): return self.base_file.base_image_path + @property + def download_url(self): + library_url = self.library.get_library_path_store().url + library_path = self.library.get_library_path_store().path + return self.base_file.path.replace(library_path, library_url) + @property def dimensions(self): file = self.base_file diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 8ce10133..a8e6b372 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -70,6 +70,7 @@ class PhotoNode(DjangoObjectType): photo_file = graphene.List(PhotoFileType) base_file_path = graphene.String() base_file_id = graphene.UUID() + download_url = graphene.String() class Meta: model = Photo @@ -114,6 +115,9 @@ def resolve_base_file_path(self, info): def resolve_base_file_id(self, info): return self.base_file.id + def resolve_download_url(self, info): + return self.download_url + class PhotoFilter(django_filters.FilterSet): multi_filter = CharFilter(method='multi_filter_filter') diff --git a/ui/package.json b/ui/package.json index 301fbf53..291f8a1a 100644 --- a/ui/package.json +++ b/ui/package.json @@ -11,7 +11,6 @@ "@testing-library/react": "^9.3.2", "@testing-library/user-event": "^7.1.2", "apollo-link-retry": "^2.2.15", - "axios": "^0.21.1", "emotion-theming": "^10.0.27", "graphql": "^15.5.0", "history": "^4.10.1", diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index 11955461..0a23ecd5 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -15,7 +15,6 @@ import { ReactComponent as ArrowLeftIcon } from '../static/images/arrow_left.svg import { ReactComponent as ArrowRightIcon } from '../static/images/arrow_right.svg' import { ReactComponent as InfoIcon } from '../static/images/info.svg' import { ReactComponent as CloseIcon } from '../static/images/close.svg' -import axios from 'axios'; // const I_KEY = 73 const LEFT_KEY = 37 @@ -178,25 +177,6 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { sizeY: objectTag.sizeY, } }) - - // Use to download an image when user click on download button. - const downloadImage = () => { - axios({ - url: `/thumbnailer/photo/3840x3840_contain_q75/${photoId}/`, - method: "GET", - responseType: "blob" // important - }).then(response => { - const url = window.URL.createObjectURL(new Blob([response.data])); - const link = document.createElement("a"); - link.href = url; - link.setAttribute( - "download", - `${photo.baseFilePath.split(/[\\\/]/).pop()}` - ); - document.body.appendChild(link); - link.click(); - }); - } return ( @@ -273,14 +253,16 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { // title="Press [I] key to show/hide photo details" /> )} - - + {photo?.downloadUrl && ( + + + + )} ) } diff --git a/ui/src/containers/PhotoDetailContainer.js b/ui/src/containers/PhotoDetailContainer.js index ea920071..350b7d38 100644 --- a/ui/src/containers/PhotoDetailContainer.js +++ b/ui/src/containers/PhotoDetailContainer.js @@ -80,6 +80,7 @@ const GET_PHOTO = gql` } baseFileId baseFilePath + downloadUrl width height } From a88424de452f2742b4eb510fce426e848c1775b8 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 26 May 2021 13:02:24 +0100 Subject: [PATCH 024/110] Import demo library changes and reduces number of thumbnail workers --- .../photos/management/commands/import_demo_photos.py | 11 +++++++---- .../photos/management/commands/thumbnail_processor.py | 2 +- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/photonix/photos/management/commands/import_demo_photos.py b/photonix/photos/management/commands/import_demo_photos.py index 96157af2..9cdf1a33 100644 --- a/photonix/photos/management/commands/import_demo_photos.py +++ b/photonix/photos/management/commands/import_demo_photos.py @@ -45,11 +45,14 @@ def import_photos(self): # Create Library library, _ = Library.objects.get_or_create( name='Demo Library', - # base_thumbnail_path='/data/cache/thumbnails/', - # base_thumbnail_url='/thumbnails/' + classification_color_enabled=True, + classification_location_enabled=True, + classification_style_enabled=True, + classification_object_enabled=True, + setup_stage_completed='Th' ) # LibraryPath as locally mounted volume - library_path, _ = LibraryPath.objects.get_or_create( + LibraryPath.objects.get_or_create( library=library, type='St', backend_type='Lo', @@ -61,7 +64,7 @@ def import_photos(self): # In dev environment user needs to be owner to access all functionality # but demo.photonix.org this could lead to the system being messed up owner = os.environ.get('ENV') == 'dev' - library_user, _ = LibraryUser.objects.get_or_create( + LibraryUser.objects.get_or_create( library=library, user=user, owner=owner diff --git a/photonix/photos/management/commands/thumbnail_processor.py b/photonix/photos/management/commands/thumbnail_processor.py index c2eb3d92..f2b0e748 100644 --- a/photonix/photos/management/commands/thumbnail_processor.py +++ b/photonix/photos/management/commands/thumbnail_processor.py @@ -28,7 +28,7 @@ class Command(BaseCommand): help = 'Processes full-sized photos into thumbnails of various sizes.' def run_processors(self): - num_workers = cpu_count() + num_workers = max(int(cpu_count() / 4), 1) threads = [] print('Starting {} thumbnail processor workers\n'.format(num_workers)) From edb77fd4a5259c1ce7cadbcb66d34e2656f157c2 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 26 May 2021 18:50:15 +0100 Subject: [PATCH 025/110] Minor tweaks --- ui/src/components/Filters.js | 13 +++--- ui/src/components/ScrollArea.js | 71 +++++++++++++++++++++++---------- ui/src/static/css/Filters.css | 32 ++++++++------- 3 files changed, 76 insertions(+), 40 deletions(-) diff --git a/ui/src/components/Filters.js b/ui/src/components/Filters.js index 1e248dc2..3b9b6142 100644 --- a/ui/src/components/Filters.js +++ b/ui/src/components/Filters.js @@ -108,7 +108,13 @@ const Filters = ({ data, selectedFilters, onToggle, searchAreaExpand }) => { return ( {isDomainAvail && ( -
+
{data.map((group) => { let items = '' let filterGroupExtraStyles = {} @@ -221,10 +227,7 @@ const Filters = ({ data, selectedFilters, onToggle, searchAreaExpand }) => { return ( {showTagSection(items, group.name) && ( -
+

{group.name}

{items}
diff --git a/ui/src/components/ScrollArea.js b/ui/src/components/ScrollArea.js index 31364838..318b6ebe 100644 --- a/ui/src/components/ScrollArea.js +++ b/ui/src/components/ScrollArea.js @@ -1,6 +1,5 @@ import React from 'react' - export default class ScrollArea extends React.Component { constructor(props) { super(props) @@ -40,12 +39,17 @@ export default class ScrollArea extends React.Component { componentDidUpdate = () => { this.init() - if (!this.initialised && this.containerRef.current && this.scrollbarHandleRef.current) { + if ( + !this.initialised && + this.containerRef.current && + this.scrollbarHandleRef.current + ) { this.forceUpdate(this.init()) - } - else if (!this.initialised) { + } else if (!this.initialised) { // Occasionally we get refs before the painting has completed so we have to force an update - setTimeout(() => {this.forceUpdate()}, 100) + setTimeout(() => { + this.forceUpdate() + }, 100) } } @@ -63,11 +67,16 @@ export default class ScrollArea extends React.Component { } if (this.containerRef.current) { - this.contentWidth = this.containerRef.current.firstChild.clientWidth + this.padding - this.contentViewWidth = this.containerRef.current.clientWidth + (2 * this.padding) - this.contentScrollRange = this.contentWidth - this.contentViewWidth + (2 * this.padding) - this.scrollbarWidth = this.containerRef.current.parentElement.clientWidth - (2 * this.padding) - this.scrollbarScrollRange = this.scrollbarWidth - this.scrollbarHandleWidth + this.contentWidth = + this.containerRef.current.firstChild.clientWidth + this.padding + this.contentViewWidth = + this.containerRef.current.clientWidth + 2 * this.padding + this.contentScrollRange = + this.contentWidth - this.contentViewWidth + 2 * this.padding + this.scrollbarWidth = + this.containerRef.current.parentElement.clientWidth - 2 * this.padding + this.scrollbarScrollRange = + this.scrollbarWidth - this.scrollbarHandleWidth } } @@ -75,16 +84,23 @@ export default class ScrollArea extends React.Component { if (this.containerRef.current) { this.contentOffset = this.containerRef.current.scrollLeft this.scrollProgress = this.contentOffset / this.contentScrollRange - this.scrollbarLeft = parseInt((this.padding) + (this.scrollProgress * this.scrollbarScrollRange), 10) + this.scrollbarLeft = parseInt( + this.padding + this.scrollProgress * this.scrollbarScrollRange, + 10 + ) this.scrollbarHandleRef.current.style.left = this.scrollbarLeft + 'px' - this.scrollbarHandleRef.current.style.width = this.scrollbarHandleWidth + 'px' + this.scrollbarHandleRef.current.style.width = + this.scrollbarHandleWidth + 'px' this.initialised = true } } positionViewport = () => { this.scrollProgress = this.dragOffset / this.scrollbarScrollRange - this.contentLeft = parseInt(this.scrollProgress * this.contentScrollRange, 10) + this.contentLeft = parseInt( + this.scrollProgress * this.contentScrollRange, + 10 + ) this.containerRef.current.scrollLeft = this.contentLeft this.positionScrollbar() } @@ -100,7 +116,7 @@ export default class ScrollArea extends React.Component { document.onmouseup = this.scrollbarRelease document.onmousemove = this.scrollbarDrag if (!this.state.displayScrollbar) { - this.setState({displayScrollbar: true}) + this.setState({ displayScrollbar: true }) } } @@ -121,30 +137,45 @@ export default class ScrollArea extends React.Component { document.onmousemove = null document.ontouchend = null document.ontouchmove = null - this.setState({displayScrollbar: false}) + this.setState({ displayScrollbar: false }) } scrollbarDrag = (e) => { e.preventDefault() - this.dragOffset = e.clientX - (this.mouseDownStart - this.scrollbarStart) - this.padding + this.dragOffset = + e.clientX - (this.mouseDownStart - this.scrollbarStart) - this.padding this.positionViewport() } scrollbarDragTouch = (e) => { - this.dragOffset = e.touches[0].clientX - (this.mouseDownStart - this.scrollbarStart) - this.padding + this.dragOffset = + e.touches[0].clientX - + (this.mouseDownStart - this.scrollbarStart) - + this.padding this.positionViewport() } // To stop auto scroll animation after one time. stopScrollAnimation = (e) => { - localStorage.setItem('autoScrollAnimation', true); + localStorage.setItem('filtersPeeked', true) } render = () => ( <> -
+
{this.props.children} -
+
) diff --git a/ui/src/static/css/Filters.css b/ui/src/static/css/Filters.css index 7e710477..c9f08250 100644 --- a/ui/src/static/css/Filters.css +++ b/ui/src/static/css/Filters.css @@ -6,26 +6,28 @@ } .FiltersContent { width: max-content; - margin: 0 0 0 40px; -} - -.FiltersContent { - width: max-content; - margin: 0; padding-left: 40px; } -.AutoAnimation { +.PeekAnimation { transition: opacity 1000ms; - animation: autoScroll linear 2s alternate; - animation-iteration-count:2; + animation: autoPeek ease-in-out 2s alternate; + animation-iteration-count: 2; } /* For auto scroll. */ -@keyframes autoScroll { - 0% { margin-left: 0; transform: translate3d(0, 0, 0);} - 25% { margin-left: 0; transform: translate3d(0, 0, 0);} - 75% { margin-left: 0; transform: translate3d(0, 0, 0);} - 100% { margin-left: 0; transform: translate3d(-200px, 0, 0);} +@keyframes autoPeek { + 0% { + margin-left: 0; + transform: translate3d(0, 0, 0); + } + 75% { + margin-left: 0; + transform: translate3d(0, 0, 0); + } + 100% { + margin-left: 0; + transform: translate3d(-200px, 0, 0); + } } .FiltersContent .filterGradient { @@ -147,7 +149,7 @@ margin: 0 -20px -20px; } .FiltersContent { - margin-left: 30px; + padding-left: 30px; } .FilterGroup { width: 180px; From 10d695006522fd9c3fd5daec4d93ca04092d7083 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 27 May 2021 14:30:23 +0100 Subject: [PATCH 026/110] Fixes MTCNN Python warning and line endings --- .../face_detection/mtcnn/__init__.py | 60 +- .../mtcnn/exceptions/__init__.py | 52 +- .../mtcnn/exceptions/invalid_image.py | 60 +- .../classifiers/face_detection/mtcnn/mtcnn.py | 1000 ++++++++--------- .../face_detection/mtcnn/network/__init__.py | 48 +- .../face_detection/mtcnn/network/factory.py | 262 ++--- 6 files changed, 741 insertions(+), 741 deletions(-) diff --git a/photonix/classifiers/face_detection/mtcnn/__init__.py b/photonix/classifiers/face_detection/mtcnn/__init__.py index 04f627bf..80f5d4cd 100644 --- a/photonix/classifiers/face_detection/mtcnn/__init__.py +++ b/photonix/classifiers/face_detection/mtcnn/__init__.py @@ -1,30 +1,30 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -# MIT License -# -# Copyright (c) 2019 Iván de Paz Centeno -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from .mtcnn import MTCNN - - -__author__ = "Iván de Paz Centeno" -__version__= "0.1.0" +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .mtcnn import MTCNN + + +__author__ = "Iván de Paz Centeno" +__version__= "0.1.0" diff --git a/photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py b/photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py index dceae345..efe2ac45 100644 --- a/photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py +++ b/photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py @@ -1,26 +1,26 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -# MIT License -# -# Copyright (c) 2019 Iván de Paz Centeno -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from .invalid_image import InvalidImage +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .invalid_image import InvalidImage diff --git a/photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py b/photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py index fbb558ef..ecfe9bc5 100755 --- a/photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py +++ b/photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py @@ -1,30 +1,30 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -# MIT License -# -# Copyright (c) 2019 Iván de Paz Centeno -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - - -__author__ = "Iván de Paz Centeno" - -class InvalidImage(Exception): - pass +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +__author__ = "Iván de Paz Centeno" + +class InvalidImage(Exception): + pass diff --git a/photonix/classifiers/face_detection/mtcnn/mtcnn.py b/photonix/classifiers/face_detection/mtcnn/mtcnn.py index 6702e264..b1f82311 100644 --- a/photonix/classifiers/face_detection/mtcnn/mtcnn.py +++ b/photonix/classifiers/face_detection/mtcnn/mtcnn.py @@ -1,500 +1,500 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -# MIT License -# -# Copyright (c) 2019 Iván de Paz Centeno -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -# -# This code is derived from the MTCNN implementation of David Sandberg for Facenet -# (https://github.com/davidsandberg/facenet/) -# It has been rebuilt from scratch, taking the David Sandberg's implementation as a reference. -# - -# import cv2 -import numpy as np -from PIL import Image -import pkg_resources - -from .exceptions import InvalidImage -from .network.factory import NetworkFactory - -__author__ = "Iván de Paz Centeno" - - -class StageStatus(object): - """ - Keeps status between MTCNN stages - """ - - def __init__(self, pad_result: tuple = None, width=0, height=0): - self.width = width - self.height = height - self.dy = self.edy = self.dx = self.edx = self.y = self.ey = self.x = self.ex = self.tmpw = self.tmph = [] - - if pad_result is not None: - self.update(pad_result) - - def update(self, pad_result: tuple): - s = self - s.dy, s.edy, s.dx, s.edx, s.y, s.ey, s.x, s.ex, s.tmpw, s.tmph = pad_result - - -class MTCNN(object): - """ - Allows to perform MTCNN Detection -> - a) Detection of faces (with the confidence probability) - b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) - """ - - def __init__(self, weights_file: str = None, min_face_size: int = 20, steps_threshold: list = None, - scale_factor: float = 0.709): - """ - Initializes the MTCNN. - :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load - the ones bundled with the package. - :param min_face_size: minimum size of the face to detect - :param steps_threshold: step's thresholds values - :param scale_factor: scale factor - """ - if steps_threshold is None: - steps_threshold = [0.6, 0.7, 0.7] - - if weights_file is None: - weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') - - self._min_face_size = min_face_size - self._steps_threshold = steps_threshold - self._scale_factor = scale_factor - - self._pnet, self._rnet, self._onet = NetworkFactory().build_P_R_O_nets_from_file(weights_file) - - @property - def min_face_size(self): - return self._min_face_size - - @min_face_size.setter - def min_face_size(self, mfc=20): - try: - self._min_face_size = int(mfc) - except ValueError: - self._min_face_size = 20 - - def __compute_scale_pyramid(self, m, min_layer): - scales = [] - factor_count = 0 - - while min_layer >= 12: - scales += [m * np.power(self._scale_factor, factor_count)] - min_layer = min_layer * self._scale_factor - factor_count += 1 - - return scales - - @staticmethod - def __scale_image(image, scale: float): - """ - Scales the image to a given scale. - :param image: - :param scale: - :return: - """ - height, width, _ = image.shape - - width_scaled = int(np.ceil(width * scale)) - height_scaled = int(np.ceil(height * scale)) - - # im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation = cv2.INTER_AREA) - im_data = Image.fromarray(image).resize((width_scaled, height_scaled), Image.BICUBIC) - im_data = np.asarray(im_data) - - # Normalize the image's pixels - im_data_normalized = (im_data - 127.5) * 0.0078125 - - return im_data_normalized - - @staticmethod - def __generate_bounding_box(imap, reg, scale, t): - - # use heatmap to generate bounding boxes - stride = 2 - cellsize = 12 - - imap = np.transpose(imap) - dx1 = np.transpose(reg[:, :, 0]) - dy1 = np.transpose(reg[:, :, 1]) - dx2 = np.transpose(reg[:, :, 2]) - dy2 = np.transpose(reg[:, :, 3]) - - y, x = np.where(imap >= t) - - if y.shape[0] == 1: - dx1 = np.flipud(dx1) - dy1 = np.flipud(dy1) - dx2 = np.flipud(dx2) - dy2 = np.flipud(dy2) - - score = imap[(y, x)] - reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) - - if reg.size == 0: - reg = np.empty(shape=(0, 3)) - - bb = np.transpose(np.vstack([y, x])) - - q1 = np.fix((stride * bb + 1) / scale) - q2 = np.fix((stride * bb + cellsize) / scale) - boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) - - return boundingbox, reg - - @staticmethod - def __nms(boxes, threshold, method): - """ - Non Maximum Suppression. - - :param boxes: np array with bounding boxes. - :param threshold: - :param method: NMS method to apply. Available values ('Min', 'Union') - :return: - """ - if boxes.size == 0: - return np.empty((0, 3)) - - x1 = boxes[:, 0] - y1 = boxes[:, 1] - x2 = boxes[:, 2] - y2 = boxes[:, 3] - s = boxes[:, 4] - - area = (x2 - x1 + 1) * (y2 - y1 + 1) - sorted_s = np.argsort(s) - - pick = np.zeros_like(s, dtype=np.int16) - counter = 0 - while sorted_s.size > 0: - i = sorted_s[-1] - pick[counter] = i - counter += 1 - idx = sorted_s[0:-1] - - xx1 = np.maximum(x1[i], x1[idx]) - yy1 = np.maximum(y1[i], y1[idx]) - xx2 = np.minimum(x2[i], x2[idx]) - yy2 = np.minimum(y2[i], y2[idx]) - - w = np.maximum(0.0, xx2 - xx1 + 1) - h = np.maximum(0.0, yy2 - yy1 + 1) - - inter = w * h - - if method is 'Min': - o = inter / np.minimum(area[i], area[idx]) - else: - o = inter / (area[i] + area[idx] - inter) - - sorted_s = sorted_s[np.where(o <= threshold)] - - pick = pick[0:counter] - - return pick - - @staticmethod - def __pad(total_boxes, w, h): - # compute the padding coordinates (pad the bounding boxes to square) - tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) - tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) - numbox = total_boxes.shape[0] - - dx = np.ones(numbox, dtype=np.int32) - dy = np.ones(numbox, dtype=np.int32) - edx = tmpw.copy().astype(np.int32) - edy = tmph.copy().astype(np.int32) - - x = total_boxes[:, 0].copy().astype(np.int32) - y = total_boxes[:, 1].copy().astype(np.int32) - ex = total_boxes[:, 2].copy().astype(np.int32) - ey = total_boxes[:, 3].copy().astype(np.int32) - - tmp = np.where(ex > w) - edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) - ex[tmp] = w - - tmp = np.where(ey > h) - edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) - ey[tmp] = h - - tmp = np.where(x < 1) - dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) - x[tmp] = 1 - - tmp = np.where(y < 1) - dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) - y[tmp] = 1 - - return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph - - @staticmethod - def __rerec(bbox): - # convert bbox to square - height = bbox[:, 3] - bbox[:, 1] - width = bbox[:, 2] - bbox[:, 0] - max_side_length = np.maximum(width, height) - bbox[:, 0] = bbox[:, 0] + width * 0.5 - max_side_length * 0.5 - bbox[:, 1] = bbox[:, 1] + height * 0.5 - max_side_length * 0.5 - bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(max_side_length, (2, 1))) - return bbox - - @staticmethod - def __bbreg(boundingbox, reg): - # calibrate bounding boxes - if reg.shape[1] == 1: - reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) - - w = boundingbox[:, 2] - boundingbox[:, 0] + 1 - h = boundingbox[:, 3] - boundingbox[:, 1] + 1 - b1 = boundingbox[:, 0] + reg[:, 0] * w - b2 = boundingbox[:, 1] + reg[:, 1] * h - b3 = boundingbox[:, 2] + reg[:, 2] * w - b4 = boundingbox[:, 3] + reg[:, 3] * h - boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) - return boundingbox - - def detect_faces(self, img) -> list: - """ - Detects bounding boxes from the specified image. - :param img: image to process - :return: list containing all the bounding boxes detected with their keypoints. - """ - if img is None or not hasattr(img, "shape"): - raise InvalidImage("Image not valid.") - - height, width, _ = img.shape - stage_status = StageStatus(width=width, height=height) - - m = 12 / self._min_face_size - min_layer = np.amin([height, width]) * m - - scales = self.__compute_scale_pyramid(m, min_layer) - - stages = [self.__stage1, self.__stage2, self.__stage3] - result = [scales, stage_status] - - # We pipe here each of the stages - for stage in stages: - result = stage(img, result[0], result[1]) - - [total_boxes, points] = result - - bounding_boxes = [] - - for bounding_box, keypoints in zip(total_boxes, points.T): - x = max(0, int(bounding_box[0])) - y = max(0, int(bounding_box[1])) - width = int(bounding_box[2] - x) - height = int(bounding_box[3] - y) - bounding_boxes.append({ - 'box': [x, y, width, height], - 'confidence': bounding_box[-1], - 'keypoints': { - 'left_eye': (int(keypoints[0]), int(keypoints[5])), - 'right_eye': (int(keypoints[1]), int(keypoints[6])), - 'nose': (int(keypoints[2]), int(keypoints[7])), - 'mouth_left': (int(keypoints[3]), int(keypoints[8])), - 'mouth_right': (int(keypoints[4]), int(keypoints[9])), - } - }) - - return bounding_boxes - - def __stage1(self, image, scales: list, stage_status: StageStatus): - """ - First stage of the MTCNN. - :param image: - :param scales: - :param stage_status: - :return: - """ - total_boxes = np.empty((0, 9)) - status = stage_status - - for scale in scales: - scaled_image = self.__scale_image(image, scale) - - img_x = np.expand_dims(scaled_image, 0) - img_y = np.transpose(img_x, (0, 2, 1, 3)) - - out = self._pnet.predict(img_y) - - out0 = np.transpose(out[0], (0, 2, 1, 3)) - out1 = np.transpose(out[1], (0, 2, 1, 3)) - - boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), - out0[0, :, :, :].copy(), scale, self._steps_threshold[0]) - - # inter-scale nms - pick = self.__nms(boxes.copy(), 0.5, 'Union') - if boxes.size > 0 and pick.size > 0: - boxes = boxes[pick, :] - total_boxes = np.append(total_boxes, boxes, axis=0) - - numboxes = total_boxes.shape[0] - - if numboxes > 0: - pick = self.__nms(total_boxes.copy(), 0.7, 'Union') - total_boxes = total_boxes[pick, :] - - regw = total_boxes[:, 2] - total_boxes[:, 0] - regh = total_boxes[:, 3] - total_boxes[:, 1] - - qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw - qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh - qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw - qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh - - total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) - total_boxes = self.__rerec(total_boxes.copy()) - - total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) - status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), - width=stage_status.width, height=stage_status.height) - - return total_boxes, status - - def __stage2(self, img, total_boxes, stage_status: StageStatus): - """ - Second stage of the MTCNN. - :param img: - :param total_boxes: - :param stage_status: - :return: - """ - - num_boxes = total_boxes.shape[0] - if num_boxes == 0: - return total_boxes, stage_status - - # second stage - tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) - - for k in range(0, num_boxes): - tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) - - tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ - img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] - - if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: - # tempimg[:,:,:, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) - tempimg[:, :, :, k] = np.asarray(Image.fromarray(np.uint8(tmp)).resize((24, 24), Image.BICUBIC)) - - else: - return np.empty(shape=(0,)), stage_status - - tempimg = (tempimg - 127.5) * 0.0078125 - tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) - - out = self._rnet.predict(tempimg1) - - out0 = np.transpose(out[0]) - out1 = np.transpose(out[1]) - - score = out1[1, :] - - ipass = np.where(score > self._steps_threshold[1]) - - total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) - - mv = out0[:, ipass[0]] - - if total_boxes.shape[0] > 0: - pick = self.__nms(total_boxes, 0.7, 'Union') - total_boxes = total_boxes[pick, :] - total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) - total_boxes = self.__rerec(total_boxes.copy()) - - return total_boxes, stage_status - - def __stage3(self, img, total_boxes, stage_status: StageStatus): - """ - Third stage of the MTCNN. - - :param img: - :param total_boxes: - :param stage_status: - :return: - """ - num_boxes = total_boxes.shape[0] - if num_boxes == 0: - return total_boxes, np.empty(shape=(0,)) - - total_boxes = np.fix(total_boxes).astype(np.int32) - - status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), - width=stage_status.width, height=stage_status.height) - - tempimg = np.zeros((48, 48, 3, num_boxes)) - - for k in range(0, num_boxes): - - tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) - - tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ - img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] - - if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: - # tempimg[:,:,:, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) - tempimg[:, :, :, k] = np.asarray(Image.fromarray(np.uint8(tmp)).resize((48, 48), Image.BICUBIC)) - else: - return np.empty(shape=(0,)), np.empty(shape=(0,)) - - tempimg = (tempimg - 127.5) * 0.0078125 - tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) - - out = self._onet.predict(tempimg1) - out0 = np.transpose(out[0]) - out1 = np.transpose(out[1]) - out2 = np.transpose(out[2]) - - score = out2[1, :] - - points = out1 - - ipass = np.where(score > self._steps_threshold[2]) - - points = points[:, ipass[0]] - - total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) - - mv = out0[:, ipass[0]] - - w = total_boxes[:, 2] - total_boxes[:, 0] + 1 - h = total_boxes[:, 3] - total_boxes[:, 1] + 1 - - points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 - points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 - - if total_boxes.shape[0] > 0: - total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) - pick = self.__nms(total_boxes.copy(), 0.7, 'Min') - total_boxes = total_boxes[pick, :] - points = points[:, pick] - - return total_boxes, points +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# +# This code is derived from the MTCNN implementation of David Sandberg for Facenet +# (https://github.com/davidsandberg/facenet/) +# It has been rebuilt from scratch, taking the David Sandberg's implementation as a reference. +# + +# import cv2 +import numpy as np +from PIL import Image +import pkg_resources + +from .exceptions import InvalidImage +from .network.factory import NetworkFactory + +__author__ = "Iván de Paz Centeno" + + +class StageStatus(object): + """ + Keeps status between MTCNN stages + """ + + def __init__(self, pad_result: tuple = None, width=0, height=0): + self.width = width + self.height = height + self.dy = self.edy = self.dx = self.edx = self.y = self.ey = self.x = self.ex = self.tmpw = self.tmph = [] + + if pad_result is not None: + self.update(pad_result) + + def update(self, pad_result: tuple): + s = self + s.dy, s.edy, s.dx, s.edx, s.y, s.ey, s.x, s.ex, s.tmpw, s.tmph = pad_result + + +class MTCNN(object): + """ + Allows to perform MTCNN Detection -> + a) Detection of faces (with the confidence probability) + b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) + """ + + def __init__(self, weights_file: str = None, min_face_size: int = 20, steps_threshold: list = None, + scale_factor: float = 0.709): + """ + Initializes the MTCNN. + :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load + the ones bundled with the package. + :param min_face_size: minimum size of the face to detect + :param steps_threshold: step's thresholds values + :param scale_factor: scale factor + """ + if steps_threshold is None: + steps_threshold = [0.6, 0.7, 0.7] + + if weights_file is None: + weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') + + self._min_face_size = min_face_size + self._steps_threshold = steps_threshold + self._scale_factor = scale_factor + + self._pnet, self._rnet, self._onet = NetworkFactory().build_P_R_O_nets_from_file(weights_file) + + @property + def min_face_size(self): + return self._min_face_size + + @min_face_size.setter + def min_face_size(self, mfc=20): + try: + self._min_face_size = int(mfc) + except ValueError: + self._min_face_size = 20 + + def __compute_scale_pyramid(self, m, min_layer): + scales = [] + factor_count = 0 + + while min_layer >= 12: + scales += [m * np.power(self._scale_factor, factor_count)] + min_layer = min_layer * self._scale_factor + factor_count += 1 + + return scales + + @staticmethod + def __scale_image(image, scale: float): + """ + Scales the image to a given scale. + :param image: + :param scale: + :return: + """ + height, width, _ = image.shape + + width_scaled = int(np.ceil(width * scale)) + height_scaled = int(np.ceil(height * scale)) + + # im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation = cv2.INTER_AREA) + im_data = Image.fromarray(image).resize((width_scaled, height_scaled), Image.BICUBIC) + im_data = np.asarray(im_data) + + # Normalize the image's pixels + im_data_normalized = (im_data - 127.5) * 0.0078125 + + return im_data_normalized + + @staticmethod + def __generate_bounding_box(imap, reg, scale, t): + + # use heatmap to generate bounding boxes + stride = 2 + cellsize = 12 + + imap = np.transpose(imap) + dx1 = np.transpose(reg[:, :, 0]) + dy1 = np.transpose(reg[:, :, 1]) + dx2 = np.transpose(reg[:, :, 2]) + dy2 = np.transpose(reg[:, :, 3]) + + y, x = np.where(imap >= t) + + if y.shape[0] == 1: + dx1 = np.flipud(dx1) + dy1 = np.flipud(dy1) + dx2 = np.flipud(dx2) + dy2 = np.flipud(dy2) + + score = imap[(y, x)] + reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) + + if reg.size == 0: + reg = np.empty(shape=(0, 3)) + + bb = np.transpose(np.vstack([y, x])) + + q1 = np.fix((stride * bb + 1) / scale) + q2 = np.fix((stride * bb + cellsize) / scale) + boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) + + return boundingbox, reg + + @staticmethod + def __nms(boxes, threshold, method): + """ + Non Maximum Suppression. + + :param boxes: np array with bounding boxes. + :param threshold: + :param method: NMS method to apply. Available values ('Min', 'Union') + :return: + """ + if boxes.size == 0: + return np.empty((0, 3)) + + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + s = boxes[:, 4] + + area = (x2 - x1 + 1) * (y2 - y1 + 1) + sorted_s = np.argsort(s) + + pick = np.zeros_like(s, dtype=np.int16) + counter = 0 + while sorted_s.size > 0: + i = sorted_s[-1] + pick[counter] = i + counter += 1 + idx = sorted_s[0:-1] + + xx1 = np.maximum(x1[i], x1[idx]) + yy1 = np.maximum(y1[i], y1[idx]) + xx2 = np.minimum(x2[i], x2[idx]) + yy2 = np.minimum(y2[i], y2[idx]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + + inter = w * h + + if method == 'Min': + o = inter / np.minimum(area[i], area[idx]) + else: + o = inter / (area[i] + area[idx] - inter) + + sorted_s = sorted_s[np.where(o <= threshold)] + + pick = pick[0:counter] + + return pick + + @staticmethod + def __pad(total_boxes, w, h): + # compute the padding coordinates (pad the bounding boxes to square) + tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) + tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) + numbox = total_boxes.shape[0] + + dx = np.ones(numbox, dtype=np.int32) + dy = np.ones(numbox, dtype=np.int32) + edx = tmpw.copy().astype(np.int32) + edy = tmph.copy().astype(np.int32) + + x = total_boxes[:, 0].copy().astype(np.int32) + y = total_boxes[:, 1].copy().astype(np.int32) + ex = total_boxes[:, 2].copy().astype(np.int32) + ey = total_boxes[:, 3].copy().astype(np.int32) + + tmp = np.where(ex > w) + edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) + ex[tmp] = w + + tmp = np.where(ey > h) + edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) + ey[tmp] = h + + tmp = np.where(x < 1) + dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) + x[tmp] = 1 + + tmp = np.where(y < 1) + dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) + y[tmp] = 1 + + return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph + + @staticmethod + def __rerec(bbox): + # convert bbox to square + height = bbox[:, 3] - bbox[:, 1] + width = bbox[:, 2] - bbox[:, 0] + max_side_length = np.maximum(width, height) + bbox[:, 0] = bbox[:, 0] + width * 0.5 - max_side_length * 0.5 + bbox[:, 1] = bbox[:, 1] + height * 0.5 - max_side_length * 0.5 + bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(max_side_length, (2, 1))) + return bbox + + @staticmethod + def __bbreg(boundingbox, reg): + # calibrate bounding boxes + if reg.shape[1] == 1: + reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) + + w = boundingbox[:, 2] - boundingbox[:, 0] + 1 + h = boundingbox[:, 3] - boundingbox[:, 1] + 1 + b1 = boundingbox[:, 0] + reg[:, 0] * w + b2 = boundingbox[:, 1] + reg[:, 1] * h + b3 = boundingbox[:, 2] + reg[:, 2] * w + b4 = boundingbox[:, 3] + reg[:, 3] * h + boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) + return boundingbox + + def detect_faces(self, img) -> list: + """ + Detects bounding boxes from the specified image. + :param img: image to process + :return: list containing all the bounding boxes detected with their keypoints. + """ + if img is None or not hasattr(img, "shape"): + raise InvalidImage("Image not valid.") + + height, width, _ = img.shape + stage_status = StageStatus(width=width, height=height) + + m = 12 / self._min_face_size + min_layer = np.amin([height, width]) * m + + scales = self.__compute_scale_pyramid(m, min_layer) + + stages = [self.__stage1, self.__stage2, self.__stage3] + result = [scales, stage_status] + + # We pipe here each of the stages + for stage in stages: + result = stage(img, result[0], result[1]) + + [total_boxes, points] = result + + bounding_boxes = [] + + for bounding_box, keypoints in zip(total_boxes, points.T): + x = max(0, int(bounding_box[0])) + y = max(0, int(bounding_box[1])) + width = int(bounding_box[2] - x) + height = int(bounding_box[3] - y) + bounding_boxes.append({ + 'box': [x, y, width, height], + 'confidence': bounding_box[-1], + 'keypoints': { + 'left_eye': (int(keypoints[0]), int(keypoints[5])), + 'right_eye': (int(keypoints[1]), int(keypoints[6])), + 'nose': (int(keypoints[2]), int(keypoints[7])), + 'mouth_left': (int(keypoints[3]), int(keypoints[8])), + 'mouth_right': (int(keypoints[4]), int(keypoints[9])), + } + }) + + return bounding_boxes + + def __stage1(self, image, scales: list, stage_status: StageStatus): + """ + First stage of the MTCNN. + :param image: + :param scales: + :param stage_status: + :return: + """ + total_boxes = np.empty((0, 9)) + status = stage_status + + for scale in scales: + scaled_image = self.__scale_image(image, scale) + + img_x = np.expand_dims(scaled_image, 0) + img_y = np.transpose(img_x, (0, 2, 1, 3)) + + out = self._pnet.predict(img_y) + + out0 = np.transpose(out[0], (0, 2, 1, 3)) + out1 = np.transpose(out[1], (0, 2, 1, 3)) + + boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), + out0[0, :, :, :].copy(), scale, self._steps_threshold[0]) + + # inter-scale nms + pick = self.__nms(boxes.copy(), 0.5, 'Union') + if boxes.size > 0 and pick.size > 0: + boxes = boxes[pick, :] + total_boxes = np.append(total_boxes, boxes, axis=0) + + numboxes = total_boxes.shape[0] + + if numboxes > 0: + pick = self.__nms(total_boxes.copy(), 0.7, 'Union') + total_boxes = total_boxes[pick, :] + + regw = total_boxes[:, 2] - total_boxes[:, 0] + regh = total_boxes[:, 3] - total_boxes[:, 1] + + qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw + qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh + qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw + qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh + + total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) + total_boxes = self.__rerec(total_boxes.copy()) + + total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) + status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), + width=stage_status.width, height=stage_status.height) + + return total_boxes, status + + def __stage2(self, img, total_boxes, stage_status: StageStatus): + """ + Second stage of the MTCNN. + :param img: + :param total_boxes: + :param stage_status: + :return: + """ + + num_boxes = total_boxes.shape[0] + if num_boxes == 0: + return total_boxes, stage_status + + # second stage + tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) + + for k in range(0, num_boxes): + tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) + + tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ + img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] + + if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: + # tempimg[:,:,:, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) + tempimg[:, :, :, k] = np.asarray(Image.fromarray(np.uint8(tmp)).resize((24, 24), Image.BICUBIC)) + + else: + return np.empty(shape=(0,)), stage_status + + tempimg = (tempimg - 127.5) * 0.0078125 + tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) + + out = self._rnet.predict(tempimg1) + + out0 = np.transpose(out[0]) + out1 = np.transpose(out[1]) + + score = out1[1, :] + + ipass = np.where(score > self._steps_threshold[1]) + + total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) + + mv = out0[:, ipass[0]] + + if total_boxes.shape[0] > 0: + pick = self.__nms(total_boxes, 0.7, 'Union') + total_boxes = total_boxes[pick, :] + total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) + total_boxes = self.__rerec(total_boxes.copy()) + + return total_boxes, stage_status + + def __stage3(self, img, total_boxes, stage_status: StageStatus): + """ + Third stage of the MTCNN. + + :param img: + :param total_boxes: + :param stage_status: + :return: + """ + num_boxes = total_boxes.shape[0] + if num_boxes == 0: + return total_boxes, np.empty(shape=(0,)) + + total_boxes = np.fix(total_boxes).astype(np.int32) + + status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), + width=stage_status.width, height=stage_status.height) + + tempimg = np.zeros((48, 48, 3, num_boxes)) + + for k in range(0, num_boxes): + + tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) + + tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ + img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] + + if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: + # tempimg[:,:,:, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) + tempimg[:, :, :, k] = np.asarray(Image.fromarray(np.uint8(tmp)).resize((48, 48), Image.BICUBIC)) + else: + return np.empty(shape=(0,)), np.empty(shape=(0,)) + + tempimg = (tempimg - 127.5) * 0.0078125 + tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) + + out = self._onet.predict(tempimg1) + out0 = np.transpose(out[0]) + out1 = np.transpose(out[1]) + out2 = np.transpose(out[2]) + + score = out2[1, :] + + points = out1 + + ipass = np.where(score > self._steps_threshold[2]) + + points = points[:, ipass[0]] + + total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) + + mv = out0[:, ipass[0]] + + w = total_boxes[:, 2] - total_boxes[:, 0] + 1 + h = total_boxes[:, 3] - total_boxes[:, 1] + 1 + + points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 + points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 + + if total_boxes.shape[0] > 0: + total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) + pick = self.__nms(total_boxes.copy(), 0.7, 'Min') + total_boxes = total_boxes[pick, :] + points = points[:, pick] + + return total_boxes, points diff --git a/photonix/classifiers/face_detection/mtcnn/network/__init__.py b/photonix/classifiers/face_detection/mtcnn/network/__init__.py index 48d3830c..368de3d0 100755 --- a/photonix/classifiers/face_detection/mtcnn/network/__init__.py +++ b/photonix/classifiers/face_detection/mtcnn/network/__init__.py @@ -1,24 +1,24 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -# MIT License -# -# Copyright (c) 2019 Iván de Paz Centeno -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff --git a/photonix/classifiers/face_detection/mtcnn/network/factory.py b/photonix/classifiers/face_detection/mtcnn/network/factory.py index 27dd4772..0bfbbe96 100755 --- a/photonix/classifiers/face_detection/mtcnn/network/factory.py +++ b/photonix/classifiers/face_detection/mtcnn/network/factory.py @@ -1,131 +1,131 @@ -#!/usr/bin/python3 -# -*- coding: utf-8 -*- - -# MIT License -# -# Copyright (c) 2019 Iván de Paz Centeno -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, PReLU, Flatten, Softmax -from tensorflow.keras.models import Model - -import numpy as np - - -class NetworkFactory: - - def build_pnet(self, input_shape=None): - if input_shape is None: - input_shape = (None, None, 3) - - p_inp = Input(input_shape) - - p_layer = Conv2D(10, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_inp) - p_layer = PReLU(shared_axes=[1, 2])(p_layer) - p_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(p_layer) - - p_layer = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer) - p_layer = PReLU(shared_axes=[1, 2])(p_layer) - - p_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer) - p_layer = PReLU(shared_axes=[1, 2])(p_layer) - - p_layer_out1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(p_layer) - p_layer_out1 = Softmax(axis=3)(p_layer_out1) - - p_layer_out2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(p_layer) - - p_net = Model(p_inp, [p_layer_out2, p_layer_out1]) - - return p_net - - def build_rnet(self, input_shape=None): - if input_shape is None: - input_shape = (24, 24, 3) - - r_inp = Input(input_shape) - - r_layer = Conv2D(28, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_inp) - r_layer = PReLU(shared_axes=[1, 2])(r_layer) - r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(r_layer) - - r_layer = Conv2D(48, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_layer) - r_layer = PReLU(shared_axes=[1, 2])(r_layer) - r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(r_layer) - - r_layer = Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding="valid")(r_layer) - r_layer = PReLU(shared_axes=[1, 2])(r_layer) - r_layer = Flatten()(r_layer) - r_layer = Dense(128)(r_layer) - r_layer = PReLU()(r_layer) - - r_layer_out1 = Dense(2)(r_layer) - r_layer_out1 = Softmax(axis=1)(r_layer_out1) - - r_layer_out2 = Dense(4)(r_layer) - - r_net = Model(r_inp, [r_layer_out2, r_layer_out1]) - - return r_net - - def build_onet(self, input_shape=None): - if input_shape is None: - input_shape = (48, 48, 3) - - o_inp = Input(input_shape) - o_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_inp) - o_layer = PReLU(shared_axes=[1, 2])(o_layer) - o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(o_layer) - - o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer) - o_layer = PReLU(shared_axes=[1, 2])(o_layer) - o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(o_layer) - - o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer) - o_layer = PReLU(shared_axes=[1, 2])(o_layer) - o_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(o_layer) - - o_layer = Conv2D(128, kernel_size=(2, 2), strides=(1, 1), padding="valid")(o_layer) - o_layer = PReLU(shared_axes=[1, 2])(o_layer) - - o_layer = Flatten()(o_layer) - o_layer = Dense(256)(o_layer) - o_layer = PReLU()(o_layer) - - o_layer_out1 = Dense(2)(o_layer) - o_layer_out1 = Softmax(axis=1)(o_layer_out1) - o_layer_out2 = Dense(4)(o_layer) - o_layer_out3 = Dense(10)(o_layer) - - o_net = Model(o_inp, [o_layer_out2, o_layer_out3, o_layer_out1]) - return o_net - - def build_P_R_O_nets_from_file(self, weights_file): - weights = np.load(weights_file, allow_pickle=True).tolist() - - p_net = self.build_pnet() - r_net = self.build_rnet() - o_net = self.build_onet() - - p_net.set_weights(weights['pnet']) - r_net.set_weights(weights['rnet']) - o_net.set_weights(weights['onet']) - - return p_net, r_net, o_net +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, PReLU, Flatten, Softmax +from tensorflow.keras.models import Model + +import numpy as np + + +class NetworkFactory: + + def build_pnet(self, input_shape=None): + if input_shape is None: + input_shape = (None, None, 3) + + p_inp = Input(input_shape) + + p_layer = Conv2D(10, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_inp) + p_layer = PReLU(shared_axes=[1, 2])(p_layer) + p_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(p_layer) + + p_layer = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer) + p_layer = PReLU(shared_axes=[1, 2])(p_layer) + + p_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer) + p_layer = PReLU(shared_axes=[1, 2])(p_layer) + + p_layer_out1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(p_layer) + p_layer_out1 = Softmax(axis=3)(p_layer_out1) + + p_layer_out2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(p_layer) + + p_net = Model(p_inp, [p_layer_out2, p_layer_out1]) + + return p_net + + def build_rnet(self, input_shape=None): + if input_shape is None: + input_shape = (24, 24, 3) + + r_inp = Input(input_shape) + + r_layer = Conv2D(28, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_inp) + r_layer = PReLU(shared_axes=[1, 2])(r_layer) + r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(r_layer) + + r_layer = Conv2D(48, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_layer) + r_layer = PReLU(shared_axes=[1, 2])(r_layer) + r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(r_layer) + + r_layer = Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding="valid")(r_layer) + r_layer = PReLU(shared_axes=[1, 2])(r_layer) + r_layer = Flatten()(r_layer) + r_layer = Dense(128)(r_layer) + r_layer = PReLU()(r_layer) + + r_layer_out1 = Dense(2)(r_layer) + r_layer_out1 = Softmax(axis=1)(r_layer_out1) + + r_layer_out2 = Dense(4)(r_layer) + + r_net = Model(r_inp, [r_layer_out2, r_layer_out1]) + + return r_net + + def build_onet(self, input_shape=None): + if input_shape is None: + input_shape = (48, 48, 3) + + o_inp = Input(input_shape) + o_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_inp) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(o_layer) + + o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(o_layer) + + o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + o_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(o_layer) + + o_layer = Conv2D(128, kernel_size=(2, 2), strides=(1, 1), padding="valid")(o_layer) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + + o_layer = Flatten()(o_layer) + o_layer = Dense(256)(o_layer) + o_layer = PReLU()(o_layer) + + o_layer_out1 = Dense(2)(o_layer) + o_layer_out1 = Softmax(axis=1)(o_layer_out1) + o_layer_out2 = Dense(4)(o_layer) + o_layer_out3 = Dense(10)(o_layer) + + o_net = Model(o_inp, [o_layer_out2, o_layer_out3, o_layer_out1]) + return o_net + + def build_P_R_O_nets_from_file(self, weights_file): + weights = np.load(weights_file, allow_pickle=True).tolist() + + p_net = self.build_pnet() + r_net = self.build_rnet() + o_net = self.build_onet() + + p_net.set_weights(weights['pnet']) + r_net.set_weights(weights['rnet']) + o_net.set_weights(weights['onet']) + + return p_net, r_net, o_net From d0be1a2f904b64cced785d26b187288e38ea4ebf Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 27 May 2021 15:15:42 +0100 Subject: [PATCH 027/110] People section in metadata panel --- ui/src/components/PhotoMetadata.js | 37 ++++++++++++++++++------------ 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/ui/src/components/PhotoMetadata.js b/ui/src/components/PhotoMetadata.js index c549011f..41ad8040 100644 --- a/ui/src/components/PhotoMetadata.js +++ b/ui/src/components/PhotoMetadata.js @@ -275,7 +275,7 @@ const PhotoMetadata = ({ )}
- {photo.locationTags.length ? ( + {photo.locationTags.length && (

Locations

- ) : ( - '' )} - {photo.location ? ( + {photo.location && (

Map

{}
- ) : ( - '' )} - {photo.colorTags.length ? ( + {photo.colorTags.length && (

Colors

- ) : ( - '' )} - {photo.objectTags.length ? ( + {photo.personTags.length && ( +
+

+ People + {showBoundingBox ? ( + setShowBoundingBox(false)} /> + ) : ( + setShowBoundingBox(true)} /> + )} +

+
    + {photo.personTags.map((photoTag, index) => ( +
  • {photoTag.tag.name}
  • + ))} +
+
+ )} + {photo.objectTags.length && (

Objects @@ -328,10 +339,8 @@ const PhotoMetadata = ({ ))}

- ) : ( - '' )} - {photo.styleTags.length ? ( + {photo.styleTags.length && (

Styles

    @@ -340,8 +349,6 @@ const PhotoMetadata = ({ ))}
- ) : ( - '' )}

From 0a13f7b3f3edfeb856784c1efeb60a8dcb13d430 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Fri, 28 May 2021 11:22:22 +0100 Subject: [PATCH 028/110] Face matching basically working --- docker/Dockerfile.dev | 1 + photonix/classifiers/face_detection/model.py | 78 ++++++++++++++++++- .../classifiers/face_detection/mtcnn/mtcnn.py | 2 +- ...classification_face_detection_processor.py | 2 +- requirements.txt | 1 + 5 files changed, 78 insertions(+), 6 deletions(-) diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index bce3561d..eac2290e 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -14,6 +14,7 @@ RUN apt-get update && \ libblas3 \ libfreetype6 \ libfreetype6-dev \ + libgl1 \ libhdf5-dev \ libimage-exiftool-perl \ libjpeg-dev \ diff --git a/photonix/classifiers/face_detection/model.py b/photonix/classifiers/face_detection/model.py index 2006cba9..5d06caf9 100644 --- a/photonix/classifiers/face_detection/model.py +++ b/photonix/classifiers/face_detection/model.py @@ -1,15 +1,19 @@ +import json import operator import os import sys from pathlib import Path +from random import randint +from deepface import DeepFace import numpy as np from PIL import Image import redis from redis_lock import Lock from photonix.classifiers.base_model import BaseModel -from .mtcnn import MTCNN +from photonix.classifiers.face_detection.mtcnn import MTCNN +from photonix.photos.models import Tag, PhotoTag GRAPH_FILE = os.path.join('face_detection', 'mtcnn_weights.npy') @@ -18,7 +22,7 @@ class FaceDetectionModel(BaseModel): name = 'face_detection' version = 20210120 approx_ram_mb = 1000 - max_num_workers = 2 + max_num_workers = 1 def __init__(self, model_dir=None, graph_file=GRAPH_FILE, lock_name=None): super().__init__(model_dir=model_dir) @@ -46,24 +50,90 @@ def predict(self, image_file, min_score=0.99): return list(filter(lambda f: f['confidence'] > min_score, results)) +def calculate_euclidian_distance(source_representation, test_representation): + euclidean_distance = np.array(source_representation) - np.array(test_representation) + euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance)) + euclidean_distance = np.sqrt(euclidean_distance) + return euclidean_distance + + +def find_closest_face_tag(library_id, source_embedding): + # Collect all previously generated embeddings + representations = [] + for photo_tag in PhotoTag.objects.filter(photo__library_id=library_id, tag__type='F'): + try: + tag_embedding = json.loads(photo_tag.extra_data)['facenet_embedding'] + representations.append((str(photo_tag.tag.id), tag_embedding)) + except KeyError: + pass + + # Calculate Euclidean distances + distances = [] + for (_, target_embedding) in representations: + distance = calculate_euclidian_distance(source_embedding, target_embedding) + distances.append(distance) + + # Return closest match and distance value + if not distances: # First face has nothing to compare to + return (None, 999) + candidate_idx = np.argmin(distances) + return (representations[candidate_idx][0], distance) + + def run_on_photo(photo_id): model = FaceDetectionModel() sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) from photonix.classifiers.runners import results_for_model_on_photo, get_or_create_tag + # Detect all faces in an image photo, results = results_for_model_on_photo(model, photo_id) + # Read image data so we can extract faces and create embeddings + path = photo_id + if photo: + path = photo.base_image_path + image_data = Image.open(path) + + # Loop over each face that was detected above + for result in results: + # Crop individual face + 30% extra in each direction + box = result['box'] + face_image = image_data.crop([box[0]-int(box[2]*0.3), box[1]-int(box[3]*0.3), box[0]+box[2]+int(box[2]*0.3), box[1]+box[3]+int(box[3]*0.3)]) + # Generate embedding with Facenet + try: + embedding = DeepFace.represent(np.asarray(face_image), model_name='Facenet') + # Add it to the results + result['embedding'] = embedding + if photo: + closest_tag, closest_distance = find_closest_face_tag(photo.library, embedding) + if closest_tag: + print(f'Closest tag: {closest_tag}') + print(f'Closest distance: {closest_distance}') + result['closest_tag'] = closest_tag + result['closest_distance'] = closest_distance + except ValueError: + pass + if photo: from django.utils import timezone from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='F') for result in results: - tag = get_or_create_tag(library=photo.library, name='Unknown face', type='F', source='C') + if result.get('closest_distance', 999) < 14: + tag = Tag.objects.get(id=result['closest_tag'], library=photo.library, type='F') + print(f'MATCHED {tag.name}') + else: + tag = get_or_create_tag(library=photo.library, name=f'Unknown face {randint(1,1000000)}', type='F', source='C') x = (result['box'][0] + (result['box'][2] / 2)) / photo.base_file.width y = (result['box'][1] + (result['box'][3] / 2)) / photo.base_file.height width = result['box'][2] / photo.base_file.width height = result['box'][3] / photo.base_file.height score = result['confidence'] - PhotoTag(photo=photo, tag=tag, source='F', confidence=score, significance=score, position_x=x, position_y=y, size_x=width, size_y=height).save() + + extra_data = '' + if 'embedding' in result: + extra_data = json.dumps({'facenet_embedding': result['embedding']}) + + PhotoTag(photo=photo, tag=tag, source='F', confidence=score, significance=score, position_x=x, position_y=y, size_x=width, size_y=height, extra_data=extra_data).save() photo.classifier_color_completed_at = timezone.now() photo.classifier_color_version = getattr(model, 'version', 0) photo.save() diff --git a/photonix/classifiers/face_detection/mtcnn/mtcnn.py b/photonix/classifiers/face_detection/mtcnn/mtcnn.py index b1f82311..b18a2e85 100644 --- a/photonix/classifiers/face_detection/mtcnn/mtcnn.py +++ b/photonix/classifiers/face_detection/mtcnn/mtcnn.py @@ -79,7 +79,7 @@ def __init__(self, weights_file: str = None, min_face_size: int = 20, steps_thre steps_threshold = [0.6, 0.7, 0.7] if weights_file is None: - weights_file = pkg_resources.resource_stream('mtcnn', 'data/mtcnn_weights.npy') + weights_file = '/data/models/face_detection/mtcnn_weights.npy' self._min_face_size = min_face_size self._steps_threshold = steps_threshold diff --git a/photonix/photos/management/commands/classification_face_detection_processor.py b/photonix/photos/management/commands/classification_face_detection_processor.py index 70384bb8..4cebd0f9 100644 --- a/photonix/photos/management/commands/classification_face_detection_processor.py +++ b/photonix/photos/management/commands/classification_face_detection_processor.py @@ -13,7 +13,7 @@ class Command(BaseCommand): help = 'Runs the workers with the face detection model.' def run_processors(self): - num_workers = 4 + num_workers = 1 batch_size = 64 threaded_queue_processor = ThreadedQueueProcessor(model, 'classify.face_detection', run_on_photo, num_workers, batch_size) threaded_queue_processor.run() diff --git a/requirements.txt b/requirements.txt index c61054fe..f1001c7b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,6 +2,7 @@ numpy==1.19.2 scipy==1.4.1 matplotlib==3.1.2 tensorflow==2.4.1 +deepface==0.0.51 Django==3.0.14 django-cors-headers==3.2.1 From cb523140579805c8b8b41532884d0cb75654ed5b Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Fri, 28 May 2021 11:50:11 +0100 Subject: [PATCH 029/110] More robust embedding loading --- photonix/classifiers/face_detection/model.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/photonix/classifiers/face_detection/model.py b/photonix/classifiers/face_detection/model.py index 5d06caf9..2f0ef6e6 100644 --- a/photonix/classifiers/face_detection/model.py +++ b/photonix/classifiers/face_detection/model.py @@ -64,7 +64,7 @@ def find_closest_face_tag(library_id, source_embedding): try: tag_embedding = json.loads(photo_tag.extra_data)['facenet_embedding'] representations.append((str(photo_tag.tag.id), tag_embedding)) - except KeyError: + except (KeyError, json.decoder.JSONDecodeError): pass # Calculate Euclidean distances @@ -138,6 +138,8 @@ def run_on_photo(photo_id): photo.classifier_color_version = getattr(model, 'version', 0) photo.save() + print('Finished') + return photo, results From 12051ebb9f517ffb02bcbba941456ae6c08c9206 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Fri, 28 May 2021 12:37:44 +0100 Subject: [PATCH 030/110] Prevents face cropping outside the original image --- photonix/classifiers/face_detection/model.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/photonix/classifiers/face_detection/model.py b/photonix/classifiers/face_detection/model.py index 2f0ef6e6..f3af1c1f 100644 --- a/photonix/classifiers/face_detection/model.py +++ b/photonix/classifiers/face_detection/model.py @@ -97,7 +97,12 @@ def run_on_photo(photo_id): for result in results: # Crop individual face + 30% extra in each direction box = result['box'] - face_image = image_data.crop([box[0]-int(box[2]*0.3), box[1]-int(box[3]*0.3), box[0]+box[2]+int(box[2]*0.3), box[1]+box[3]+int(box[3]*0.3)]) + face_image = image_data.crop([ + max(box[0]-int(box[2]*0.3), 0), + max(box[1]-int(box[3]*0.3), 0), + min(box[0]+box[2]+int(box[2]*0.3), image_data.width), + min(box[1]+box[3]+int(box[3]*0.3), image_data.height) + ]) # Generate embedding with Facenet try: embedding = DeepFace.represent(np.asarray(face_image), model_name='Facenet') From 3f49fe4c67852169ec3bd0cad7dbbdbb81c76f2b Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Fri, 28 May 2021 15:46:34 +0100 Subject: [PATCH 031/110] Fixes metadata display --- ui/src/components/PhotoMetadata.js | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/ui/src/components/PhotoMetadata.js b/ui/src/components/PhotoMetadata.js index 41ad8040..43431cae 100644 --- a/ui/src/components/PhotoMetadata.js +++ b/ui/src/components/PhotoMetadata.js @@ -275,7 +275,7 @@ const PhotoMetadata = ({ )}

- {photo.locationTags.length && ( + {photo.locationTags.length > 0 && (

Locations

)} - {photo.colorTags.length && ( + {photo.colorTags.length > 0 && (

Colors

)} - {photo.personTags.length && ( + {photo.personTags.length > 0 && (

People @@ -323,7 +323,7 @@ const PhotoMetadata = ({

)} - {photo.objectTags.length && ( + {photo.objectTags.length > 0 && (

Objects @@ -340,7 +340,7 @@ const PhotoMetadata = ({

)} - {photo.styleTags.length && ( + {photo.styleTags.length > 0 && (

Styles

    From 69449e3a5f27b30786947d87d09abd3d24cfef8f Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Sat, 29 May 2021 12:05:39 +0100 Subject: [PATCH 032/110] Included deepface and stripped it down, only extra dependency is now opencv --- .../{face_detection => face}/__init__.py | 0 .../classifiers/face/deepface/DeepFace.py | 103 ++++ .../classifiers/face/deepface/__init__.py | 0 .../face/deepface/basemodels/Facenet.py | 542 ++++++++++++++++++ .../face/deepface/basemodels/__init__.py | 0 .../face/deepface/commons/__init__.py | 0 .../face/deepface/commons/distance.py | 43 ++ .../face/deepface/commons/functions.py | 243 ++++++++ .../face/deepface/models/__init__.py | 0 .../{face_detection => face}/model.py | 26 +- .../mtcnn/__init__.py | 0 .../mtcnn/exceptions/__init__.py | 0 .../mtcnn/exceptions/invalid_image.py | 0 .../mtcnn/layer_factory.py | 0 .../{face_detection => face}/mtcnn/mtcnn.py | 2 +- .../{face_detection => face}/mtcnn/network.py | 0 .../mtcnn/network/__init__.py | 0 .../mtcnn/network/factory.py | 0 ...classification_face_detection_processor.py | 4 +- photonix/photos/utils/classification.py | 2 +- requirements.txt | 2 +- 21 files changed, 946 insertions(+), 21 deletions(-) rename photonix/classifiers/{face_detection => face}/__init__.py (100%) create mode 100644 photonix/classifiers/face/deepface/DeepFace.py create mode 100644 photonix/classifiers/face/deepface/__init__.py create mode 100644 photonix/classifiers/face/deepface/basemodels/Facenet.py create mode 100644 photonix/classifiers/face/deepface/basemodels/__init__.py create mode 100644 photonix/classifiers/face/deepface/commons/__init__.py create mode 100644 photonix/classifiers/face/deepface/commons/distance.py create mode 100644 photonix/classifiers/face/deepface/commons/functions.py create mode 100644 photonix/classifiers/face/deepface/models/__init__.py rename photonix/classifiers/{face_detection => face}/model.py (87%) rename photonix/classifiers/{face_detection => face}/mtcnn/__init__.py (100%) rename photonix/classifiers/{face_detection => face}/mtcnn/exceptions/__init__.py (100%) rename photonix/classifiers/{face_detection => face}/mtcnn/exceptions/invalid_image.py (100%) rename photonix/classifiers/{face_detection => face}/mtcnn/layer_factory.py (100%) rename photonix/classifiers/{face_detection => face}/mtcnn/mtcnn.py (99%) rename photonix/classifiers/{face_detection => face}/mtcnn/network.py (100%) rename photonix/classifiers/{face_detection => face}/mtcnn/network/__init__.py (100%) rename photonix/classifiers/{face_detection => face}/mtcnn/network/factory.py (100%) diff --git a/photonix/classifiers/face_detection/__init__.py b/photonix/classifiers/face/__init__.py similarity index 100% rename from photonix/classifiers/face_detection/__init__.py rename to photonix/classifiers/face/__init__.py diff --git a/photonix/classifiers/face/deepface/DeepFace.py b/photonix/classifiers/face/deepface/DeepFace.py new file mode 100644 index 00000000..0129b514 --- /dev/null +++ b/photonix/classifiers/face/deepface/DeepFace.py @@ -0,0 +1,103 @@ +import warnings +warnings.filterwarnings("ignore") + +import os +#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + +from photonix.classifiers.face.deepface.basemodels import Facenet +from photonix.classifiers.face.deepface.commons import functions, distance as dst + +import tensorflow as tf +tf_version = int(tf.__version__.split(".")[0]) +if tf_version == 2: + import logging + tf.get_logger().setLevel(logging.ERROR) + +def build_model(model_name): + + """ + This function builds a deepface model + Parameters: + model_name (string): face recognition or facial attribute model + VGG-Face, Facenet, OpenFace, DeepFace, DeepID for face recognition + Age, Gender, Emotion, Race for facial attributes + + Returns: + built deepface model + """ + + models = { + 'Facenet': Facenet.loadModel, + } + + model = models.get(model_name) + + if model: + model = model() + #print('Using {} model backend'.format(model_name)) + return model + else: + raise ValueError('Invalid model_name passed - {}'.format(model_name)) + + +def represent(img_path, model_name = 'VGG-Face', model = None, enforce_detection = True, detector_backend = 'mtcnn'): + + """ + This function represents facial images as vectors. + + Parameters: + img_path: exact image path, numpy array or based64 encoded images could be passed. + + model_name (string): VGG-Face, Facenet, OpenFace, DeepFace, DeepID, Dlib, ArcFace. + + model: Built deepface model. A face recognition model is built every call of verify function. You can pass pre-built face recognition model optionally if you will call verify function several times. Consider to pass model if you are going to call represent function in a for loop. + + model = DeepFace.build_model('VGG-Face') + + enforce_detection (boolean): If any face could not be detected in an image, then verify function will return exception. Set this to False not to have this exception. This might be convenient for low resolution images. + + detector_backend (string): set face detector backend as mtcnn, opencv, ssd or dlib + + Returns: + Represent function returns a multidimensional vector. The number of dimensions is changing based on the reference model. E.g. FaceNet returns 128 dimensional vector; VGG-Face returns 2622 dimensional vector. + """ + + if model is None: + model = build_model(model_name) + + #--------------------------------- + + #decide input shape + input_shape = input_shape_x, input_shape_y= functions.find_input_shape(model) + + #detect and align + img = functions.preprocess_face(img = img_path + , target_size=(input_shape_y, input_shape_x) + , enforce_detection = enforce_detection + , detector_backend = detector_backend) + + #represent + embedding = model.predict(img)[0].tolist() + + return embedding + + +def detectFace(img_path, detector_backend = 'mtcnn'): + + """ + This function applies pre-processing stages of a face recognition pipeline including detection and alignment + + Parameters: + img_path: exact image path, numpy array or base64 encoded image + + detector_backend (string): face detection backends are mtcnn, opencv, ssd or dlib + + Returns: + deteced and aligned face in numpy format + """ + + functions.initialize_detector(detector_backend = detector_backend) + + img = functions.preprocess_face(img = img_path, detector_backend = detector_backend)[0] #preprocess_face returns (1, 224, 224, 3) + return img[:, :, ::-1] #bgr to rgb diff --git a/photonix/classifiers/face/deepface/__init__.py b/photonix/classifiers/face/deepface/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/photonix/classifiers/face/deepface/basemodels/Facenet.py b/photonix/classifiers/face/deepface/basemodels/Facenet.py new file mode 100644 index 00000000..e81ec222 --- /dev/null +++ b/photonix/classifiers/face/deepface/basemodels/Facenet.py @@ -0,0 +1,542 @@ +import os + +from tensorflow.keras.models import Model +from tensorflow.keras.layers import Activation +from tensorflow.keras.layers import BatchNormalization +from tensorflow.keras.layers import Concatenate +from tensorflow.keras.layers import Conv2D +from tensorflow.keras.layers import Dense +from tensorflow.keras.layers import Dropout +from tensorflow.keras.layers import GlobalAveragePooling2D +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import Lambda +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.layers import add +from tensorflow.keras import backend as K + + +def scaling(x, scale): + return x * scale + + +def InceptionResNetV2(): + + inputs = Input(shape=(160, 160, 3)) + x = Conv2D(32, 3, strides=2, padding='valid', use_bias=False, name= 'Conv2d_1a_3x3') (inputs) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_1a_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_1a_3x3_Activation')(x) + x = Conv2D(32, 3, strides=1, padding='valid', use_bias=False, name= 'Conv2d_2a_3x3') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_2a_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_2a_3x3_Activation')(x) + x = Conv2D(64, 3, strides=1, padding='same', use_bias=False, name= 'Conv2d_2b_3x3') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_2b_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_2b_3x3_Activation')(x) + x = MaxPooling2D(3, strides=2, name='MaxPool_3a_3x3')(x) + x = Conv2D(80, 1, strides=1, padding='valid', use_bias=False, name= 'Conv2d_3b_1x1') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_3b_1x1_BatchNorm')(x) + x = Activation('relu', name='Conv2d_3b_1x1_Activation')(x) + x = Conv2D(192, 3, strides=1, padding='valid', use_bias=False, name= 'Conv2d_4a_3x3') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_4a_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_4a_3x3_Activation')(x) + x = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Conv2d_4b_3x3') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_4b_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_4b_3x3_Activation')(x) + + # 5x Block35 (Inception-ResNet-A block): + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_1_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_1_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_1_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_1_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_1_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_1_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_1_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_1_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_1_Activation')(x) + + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_2_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_2_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_2_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_2_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_2_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_2_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_2_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_2_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_2_Activation')(x) + + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_3_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_3_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_3_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_3_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_3_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_3_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_3_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_3_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_3_Activation')(x) + + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_4_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_4_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_4_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_4_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_4_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_4_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_4_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_4_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_4_Activation')(x) + + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_5_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_5_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_5_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_5_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_5_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_5_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_5_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_5_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_5_Activation')(x) + + # Mixed 6a (Reduction-A block): + branch_0 = Conv2D(384, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_6a_Branch_0_Conv2d_1a_3x3') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_0_Conv2d_1a_3x3_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Mixed_6a_Branch_0_Conv2d_1a_3x3_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_6a_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_6a_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, 3, strides=1, padding='same', use_bias=False, name= 'Mixed_6a_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_6a_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_1 = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_6a_Branch_1_Conv2d_1a_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_1_Conv2d_1a_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_6a_Branch_1_Conv2d_1a_3x3_Activation')(branch_1) + branch_pool = MaxPooling2D(3, strides=2, padding='valid', name='Mixed_6a_Branch_2_MaxPool_1a_3x3')(x) + branches = [branch_0, branch_1, branch_pool] + x = Concatenate(axis=3, name='Mixed_6a')(branches) + + # 10x Block17 (Inception-ResNet-B block): + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_1_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_1_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_1_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_1_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_1_Branch_1_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_1_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_1_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_1_Branch_1_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_1_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_1_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_1_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_2_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_2_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_2_Branch_2_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_2_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_2_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_2_Branch_2_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_2_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_2_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_2_Branch_2_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_2_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_2_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_2_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_3_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_3_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_3_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_3_Branch_3_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_3_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_3_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_3_Branch_3_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_3_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_3_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_3_Branch_3_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_3_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_3_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_3_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_4_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_4_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_4_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_4_Branch_4_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_4_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_4_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_4_Branch_4_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_4_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_4_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_4_Branch_4_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_4_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_4_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_4_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_5_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_5_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_5_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_5_Branch_5_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_5_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_5_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_5_Branch_5_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_5_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_5_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_5_Branch_5_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_5_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_5_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_5_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_6_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_6_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_6_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_6_Branch_6_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_6_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_6_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_6_Branch_6_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_6_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_6_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_6_Branch_6_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_6_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_6_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_6_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_7_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_7_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_7_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_7_Branch_7_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_7_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_7_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_7_Branch_7_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_7_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_7_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_7_Branch_7_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_7_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_7_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_7_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_8_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_8_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_8_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_8_Branch_8_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_8_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_8_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_8_Branch_8_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_8_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_8_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_8_Branch_8_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_8_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_8_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_8_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_9_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_9_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_9_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_9_Branch_9_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_9_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_9_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_9_Branch_9_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_9_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_9_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_9_Branch_9_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_9_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_9_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_9_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_10_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_10_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_10_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_10_Branch_10_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_10_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_10_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_10_Branch_10_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_10_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_10_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_10_Branch_10_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_10_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_10_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_10_Activation')(x) + + # Mixed 7a (Reduction-B block): 8 x 8 x 2080 + branch_0 = Conv2D(256, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_0_Conv2d_0a_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_0_Conv2d_0a_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Mixed_7a_Branch_0_Conv2d_0a_1x1_Activation')(branch_0) + branch_0 = Conv2D(384, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_7a_Branch_0_Conv2d_1a_3x3') (branch_0) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_0_Conv2d_1a_3x3_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Mixed_7a_Branch_0_Conv2d_1a_3x3_Activation')(branch_0) + branch_1 = Conv2D(256, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_7a_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_7a_Branch_1_Conv2d_1a_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_1_Conv2d_1a_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_7a_Branch_1_Conv2d_1a_3x3_Activation')(branch_1) + branch_2 = Conv2D(256, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Mixed_7a_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(256, 3, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Mixed_7a_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_7a_Branch_2_Conv2d_1a_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_2_Conv2d_1a_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Mixed_7a_Branch_2_Conv2d_1a_3x3_Activation')(branch_2) + branch_pool = MaxPooling2D(3, strides=2, padding='valid', name='Mixed_7a_Branch_3_MaxPool_1a_3x3')(x) + branches = [branch_0, branch_1, branch_2, branch_pool] + x = Concatenate(axis=3, name='Mixed_7a')(branches) + + # 5x Block8 (Inception-ResNet-C block): + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_1_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_1_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_1_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_1_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_1_Branch_1_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_1_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_1_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_1_Branch_1_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_1_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_1_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_1_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_2_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_2_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_2_Branch_2_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_2_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_2_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_2_Branch_2_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_2_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_2_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_2_Branch_2_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_2_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_2_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_2_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_3_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_3_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_3_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_3_Branch_3_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_3_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_3_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_3_Branch_3_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_3_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_3_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_3_Branch_3_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_3_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_3_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_3_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_4_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_4_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_4_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_4_Branch_4_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_4_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_4_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_4_Branch_4_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_4_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_4_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_4_Branch_4_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_4_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_4_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_4_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_5_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_5_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_5_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_5_Branch_5_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_5_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_5_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_5_Branch_5_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_5_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_5_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_5_Branch_5_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_5_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_5_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_5_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_6_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_6_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_1_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_1_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_6_Branch_1_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_1_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_1_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_6_Branch_1_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_6_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_6_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 1})(up) + x = add([x, up]) + + # Classification block + x = GlobalAveragePooling2D(name='AvgPool')(x) + x = Dropout(1.0 - 0.8, name='Dropout')(x) + # Bottleneck + x = Dense(128, use_bias=False, name='Bottleneck')(x) + x = BatchNormalization(momentum=0.995, epsilon=0.001, scale=False, name='Bottleneck_BatchNorm')(x) + + # Create model + model = Model(inputs, x, name='inception_resnet_v1') + + return model + + +def loadModel(): + model = InceptionResNetV2() + weights = '/data/models/face/facenet_weights.h5' + + if os.path.isfile(weights) != True: + raise FileNotFoundError('Facenet weights does not exist') + + model.load_weights(weights) + + return model diff --git a/photonix/classifiers/face/deepface/basemodels/__init__.py b/photonix/classifiers/face/deepface/basemodels/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/photonix/classifiers/face/deepface/commons/__init__.py b/photonix/classifiers/face/deepface/commons/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/photonix/classifiers/face/deepface/commons/distance.py b/photonix/classifiers/face/deepface/commons/distance.py new file mode 100644 index 00000000..0e4dd786 --- /dev/null +++ b/photonix/classifiers/face/deepface/commons/distance.py @@ -0,0 +1,43 @@ +import numpy as np + + +def findCosineDistance(source_representation, test_representation): + a = np.matmul(np.transpose(source_representation), test_representation) + b = np.sum(np.multiply(source_representation, source_representation)) + c = np.sum(np.multiply(test_representation, test_representation)) + return 1 - (a / (np.sqrt(b) * np.sqrt(c))) + + +def findEuclideanDistance(source_representation, test_representation): + if type(source_representation) == list: + source_representation = np.array(source_representation) + + if type(test_representation) == list: + test_representation = np.array(test_representation) + + euclidean_distance = source_representation - test_representation + euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance)) + euclidean_distance = np.sqrt(euclidean_distance) + return euclidean_distance + + +def l2_normalize(x): + return x / np.sqrt(np.sum(np.multiply(x, x))) + + +def findThreshold(model_name, distance_metric): + base_threshold = {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75} + + thresholds = { + 'VGG-Face': {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75}, + 'OpenFace': {'cosine': 0.10, 'euclidean': 0.55, 'euclidean_l2': 0.55}, + 'Facenet': {'cosine': 0.40, 'euclidean': 10, 'euclidean_l2': 0.80}, + 'DeepFace': {'cosine': 0.23, 'euclidean': 64, 'euclidean_l2': 0.64}, + 'DeepID': {'cosine': 0.015, 'euclidean': 45, 'euclidean_l2': 0.17}, + 'Dlib': {'cosine': 0.07, 'euclidean': 0.6, 'euclidean_l2': 0.6}, + 'ArcFace': {'cosine': 0.6871912959056619, 'euclidean': 4.1591468986978075, 'euclidean_l2': 1.1315718048269017} + } + + threshold = thresholds.get(model_name, base_threshold).get(distance_metric, 0.4) + + return threshold diff --git a/photonix/classifiers/face/deepface/commons/functions.py b/photonix/classifiers/face/deepface/commons/functions.py new file mode 100644 index 00000000..b93c7e6b --- /dev/null +++ b/photonix/classifiers/face/deepface/commons/functions.py @@ -0,0 +1,243 @@ +import os +import numpy as np +import cv2 +from pathlib import Path +import math +from PIL import Image +import base64 +from photonix.classifiers.face.deepface.commons import distance +from photonix.classifiers.face.mtcnn import MTCNN #0.1.0 + +import tensorflow as tf +tf_version = int(tf.__version__.split(".")[0]) + +if tf_version == 1: + import keras + from keras.preprocessing.image import load_img, save_img, img_to_array + from keras.applications.imagenet_utils import preprocess_input + from keras.preprocessing import image +elif tf_version == 2: + from tensorflow import keras + from tensorflow.keras.preprocessing.image import load_img, save_img, img_to_array + from tensorflow.keras.applications.imagenet_utils import preprocess_input + from tensorflow.keras.preprocessing import image + +#-------------------------------------------------- + +def initialize_input(img1_path, img2_path = None): + + if type(img1_path) == list: + bulkProcess = True + img_list = img1_path.copy() + else: + bulkProcess = False + + if ( + (type(img2_path) == str and img2_path != None) #exact image path, base64 image + or (isinstance(img2_path, np.ndarray) and img2_path.any()) #numpy array + ): + img_list = [[img1_path, img2_path]] + else: #analyze function passes just img1_path + img_list = [img1_path] + + return img_list, bulkProcess + + +def initialize_detector(detector_backend): + + global face_detector + + if detector_backend == 'mtcnn': + face_detector = MTCNN() + + else: + raise ValueError('mtcnn is the only detector backend available') + + +def loadBase64Img(uri): + encoded_data = uri.split(',')[1] + nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8) + img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) + return img + +def get_opencv_path(): + opencv_home = cv2.__file__ + folders = opencv_home.split(os.path.sep)[0:-1] + + path = folders[0] + for folder in folders[1:]: + path = path + "/" + folder + + return path+"/data/" + +def load_image(img): + + exact_image = False + if type(img).__module__ == np.__name__: + exact_image = True + + base64_img = False + if len(img) > 11 and img[0:11] == "data:image/": + base64_img = True + + #--------------------------- + + if base64_img == True: + img = loadBase64Img(img) + + elif exact_image != True: #image path passed as input + if os.path.isfile(img) != True: + raise ValueError("Confirm that ",img," exists") + + img = cv2.imread(img) + + return img + +def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_detection = True): + + img_region = [0, 0, img.shape[0], img.shape[1]] + + #if functions.preproces_face is called directly, then face_detector global variable might not been initialized. + if not "face_detector" in globals(): + initialize_detector(detector_backend = detector_backend) + + if detector_backend == 'mtcnn': + + img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR + detections = face_detector.detect_faces(img_rgb) + + if len(detections) > 0: + detection = detections[0] + x, y, w, h = detection["box"] + detected_face = img[int(y):int(y+h), int(x):int(x+w)] + return detected_face, [x, y, w, h] + + else: #if no face detected + if not enforce_detection: + return img, img_region + + else: + raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.") + + else: + detectors = ['mtcnn'] + raise ValueError("Valid backends are ", detectors," but you passed ", detector_backend) + +def alignment_procedure(img, left_eye, right_eye): + + #this function aligns given face in img based on left and right eye coordinates + + left_eye_x, left_eye_y = left_eye + right_eye_x, right_eye_y = right_eye + + #----------------------- + #find rotation direction + + if left_eye_y > right_eye_y: + point_3rd = (right_eye_x, left_eye_y) + direction = -1 #rotate same direction to clock + else: + point_3rd = (left_eye_x, right_eye_y) + direction = 1 #rotate inverse direction of clock + + #----------------------- + #find length of triangle edges + + a = distance.findEuclideanDistance(np.array(left_eye), np.array(point_3rd)) + b = distance.findEuclideanDistance(np.array(right_eye), np.array(point_3rd)) + c = distance.findEuclideanDistance(np.array(right_eye), np.array(left_eye)) + + #----------------------- + + #apply cosine rule + + if b != 0 and c != 0: #this multiplication causes division by zero in cos_a calculation + + cos_a = (b*b + c*c - a*a)/(2*b*c) + angle = np.arccos(cos_a) #angle in radian + angle = (angle * 180) / math.pi #radian to degree + + #----------------------- + #rotate base image + + if direction == -1: + angle = 90 - angle + + img = Image.fromarray(img) + img = np.array(img.rotate(direction * angle)) + + #----------------------- + + return img #return img anyway + +def align_face(img, detector_backend = 'mtcnn'): + + if detector_backend == 'mtcnn': + + img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR + detections = face_detector.detect_faces(img_rgb) + + if len(detections) > 0: + detection = detections[0] + + keypoints = detection["keypoints"] + left_eye = keypoints["left_eye"] + right_eye = keypoints["right_eye"] + + img = alignment_procedure(img, left_eye, right_eye) + + return img #return img anyway + +def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False): + + #img_path = copy.copy(img) + + #img might be path, base64 or numpy array. Convert it to numpy whatever it is. + img = load_image(img) + base_img = img.copy() + + img, region = detect_face(img = img, detector_backend = detector_backend, grayscale = grayscale, enforce_detection = enforce_detection) + + #-------------------------- + + if img.shape[0] > 0 and img.shape[1] > 0: + img = align_face(img = img, detector_backend = detector_backend) + else: + + if enforce_detection == True: + raise ValueError("Detected face shape is ", img.shape,". Consider to set enforce_detection argument to False.") + else: #restore base image + img = base_img.copy() + + #-------------------------- + + #post-processing + if grayscale == True: + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + + img = cv2.resize(img, target_size) + img_pixels = image.img_to_array(img) + img_pixels = np.expand_dims(img_pixels, axis = 0) + img_pixels /= 255 #normalize input in [0, 1] + + if return_region == True: + return img_pixels, region + else: + return img_pixels + +def find_input_shape(model): + + #face recognition models have different size of inputs + #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. + + input_shape = model.layers[0].input_shape + + if type(input_shape) == list: + input_shape = input_shape[0][1:3] + else: + input_shape = input_shape[1:3] + + if type(input_shape) == list: #issue 197: some people got array here instead of tuple + input_shape = tuple(input_shape) + + return input_shape diff --git a/photonix/classifiers/face/deepface/models/__init__.py b/photonix/classifiers/face/deepface/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/photonix/classifiers/face_detection/model.py b/photonix/classifiers/face/model.py similarity index 87% rename from photonix/classifiers/face_detection/model.py rename to photonix/classifiers/face/model.py index f3af1c1f..f457687c 100644 --- a/photonix/classifiers/face_detection/model.py +++ b/photonix/classifiers/face/model.py @@ -1,26 +1,26 @@ import json -import operator import os import sys from pathlib import Path from random import randint -from deepface import DeepFace import numpy as np from PIL import Image import redis from redis_lock import Lock from photonix.classifiers.base_model import BaseModel -from photonix.classifiers.face_detection.mtcnn import MTCNN -from photonix.photos.models import Tag, PhotoTag +from photonix.classifiers.face.deepface import DeepFace +from photonix.classifiers.face.mtcnn import MTCNN +from photonix.classifiers.face.deepface.commons.distance import findEuclideanDistance -GRAPH_FILE = os.path.join('face_detection', 'mtcnn_weights.npy') +GRAPH_FILE = os.path.join('face', 'mtcnn_weights.npy') + class FaceDetectionModel(BaseModel): - name = 'face_detection' - version = 20210120 + name = 'face' + version = 20210528 approx_ram_mb = 1000 max_num_workers = 1 @@ -50,15 +50,9 @@ def predict(self, image_file, min_score=0.99): return list(filter(lambda f: f['confidence'] > min_score, results)) -def calculate_euclidian_distance(source_representation, test_representation): - euclidean_distance = np.array(source_representation) - np.array(test_representation) - euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance)) - euclidean_distance = np.sqrt(euclidean_distance) - return euclidean_distance - - def find_closest_face_tag(library_id, source_embedding): # Collect all previously generated embeddings + from photonix.photos.models import PhotoTag representations = [] for photo_tag in PhotoTag.objects.filter(photo__library_id=library_id, tag__type='F'): try: @@ -70,7 +64,7 @@ def find_closest_face_tag(library_id, source_embedding): # Calculate Euclidean distances distances = [] for (_, target_embedding) in representations: - distance = calculate_euclidian_distance(source_embedding, target_embedding) + distance = findEuclideanDistance(source_embedding, target_embedding) distances.append(distance) # Return closest match and distance value @@ -120,7 +114,7 @@ def run_on_photo(photo_id): if photo: from django.utils import timezone - from photonix.photos.models import PhotoTag + from photonix.photos.models import Tag, PhotoTag photo.clear_tags(source='C', type='F') for result in results: if result.get('closest_distance', 999) < 14: diff --git a/photonix/classifiers/face_detection/mtcnn/__init__.py b/photonix/classifiers/face/mtcnn/__init__.py similarity index 100% rename from photonix/classifiers/face_detection/mtcnn/__init__.py rename to photonix/classifiers/face/mtcnn/__init__.py diff --git a/photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py b/photonix/classifiers/face/mtcnn/exceptions/__init__.py similarity index 100% rename from photonix/classifiers/face_detection/mtcnn/exceptions/__init__.py rename to photonix/classifiers/face/mtcnn/exceptions/__init__.py diff --git a/photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py b/photonix/classifiers/face/mtcnn/exceptions/invalid_image.py similarity index 100% rename from photonix/classifiers/face_detection/mtcnn/exceptions/invalid_image.py rename to photonix/classifiers/face/mtcnn/exceptions/invalid_image.py diff --git a/photonix/classifiers/face_detection/mtcnn/layer_factory.py b/photonix/classifiers/face/mtcnn/layer_factory.py similarity index 100% rename from photonix/classifiers/face_detection/mtcnn/layer_factory.py rename to photonix/classifiers/face/mtcnn/layer_factory.py diff --git a/photonix/classifiers/face_detection/mtcnn/mtcnn.py b/photonix/classifiers/face/mtcnn/mtcnn.py similarity index 99% rename from photonix/classifiers/face_detection/mtcnn/mtcnn.py rename to photonix/classifiers/face/mtcnn/mtcnn.py index b18a2e85..731d5cab 100644 --- a/photonix/classifiers/face_detection/mtcnn/mtcnn.py +++ b/photonix/classifiers/face/mtcnn/mtcnn.py @@ -79,7 +79,7 @@ def __init__(self, weights_file: str = None, min_face_size: int = 20, steps_thre steps_threshold = [0.6, 0.7, 0.7] if weights_file is None: - weights_file = '/data/models/face_detection/mtcnn_weights.npy' + weights_file = '/data/models/face/mtcnn_weights.npy' self._min_face_size = min_face_size self._steps_threshold = steps_threshold diff --git a/photonix/classifiers/face_detection/mtcnn/network.py b/photonix/classifiers/face/mtcnn/network.py similarity index 100% rename from photonix/classifiers/face_detection/mtcnn/network.py rename to photonix/classifiers/face/mtcnn/network.py diff --git a/photonix/classifiers/face_detection/mtcnn/network/__init__.py b/photonix/classifiers/face/mtcnn/network/__init__.py similarity index 100% rename from photonix/classifiers/face_detection/mtcnn/network/__init__.py rename to photonix/classifiers/face/mtcnn/network/__init__.py diff --git a/photonix/classifiers/face_detection/mtcnn/network/factory.py b/photonix/classifiers/face/mtcnn/network/factory.py similarity index 100% rename from photonix/classifiers/face_detection/mtcnn/network/factory.py rename to photonix/classifiers/face/mtcnn/network/factory.py diff --git a/photonix/photos/management/commands/classification_face_detection_processor.py b/photonix/photos/management/commands/classification_face_detection_processor.py index 4cebd0f9..28dcc397 100644 --- a/photonix/photos/management/commands/classification_face_detection_processor.py +++ b/photonix/photos/management/commands/classification_face_detection_processor.py @@ -1,6 +1,6 @@ from django.core.management.base import BaseCommand # Pre-load the model graphs so it doesn't have to be done for each job -from photonix.classifiers.face_detection import FaceDetectionModel, run_on_photo +from photonix.classifiers.face import FaceDetectionModel, run_on_photo from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor @@ -15,7 +15,7 @@ class Command(BaseCommand): def run_processors(self): num_workers = 1 batch_size = 64 - threaded_queue_processor = ThreadedQueueProcessor(model, 'classify.face_detection', run_on_photo, num_workers, batch_size) + threaded_queue_processor = ThreadedQueueProcessor(model, 'classify.face', run_on_photo, num_workers, batch_size) threaded_queue_processor.run() def handle(self, *args, **options): diff --git a/photonix/photos/utils/classification.py b/photonix/photos/utils/classification.py index 7946cde4..c1713b0a 100644 --- a/photonix/photos/utils/classification.py +++ b/photonix/photos/utils/classification.py @@ -13,7 +13,7 @@ 'location', 'object', 'style', - 'face_detection', + 'face', ] diff --git a/requirements.txt b/requirements.txt index f1001c7b..be6de12c 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.19.2 scipy==1.4.1 matplotlib==3.1.2 tensorflow==2.4.1 -deepface==0.0.51 +opencv-python>=3.4.4 Django==3.0.14 django-cors-headers==3.2.1 From 4da4d1e99083da30579005ca8bfd582ea11c05ac Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Sun, 30 May 2021 22:26:47 +0100 Subject: [PATCH 033/110] Filtering by people and improvements to detection bounding box display --- .../face/deepface/commons/functions.py | 100 ++++++++---------- photonix/classifiers/face/model.py | 4 +- .../migrations/0009_auto_20210529_1244.py | 23 ++++ photonix/photos/models.py | 3 +- photonix/photos/schema.py | 4 +- requirements.txt | 5 +- ui/src/components/BoundingBoxes.js | 11 +- ui/src/components/SearchInput.js | 8 +- ui/src/components/ZoomableImage.js | 1 + ui/src/containers/FiltersContainer.js | 8 +- ui/src/static/images/person.svg | 1 + 11 files changed, 97 insertions(+), 71 deletions(-) create mode 100644 photonix/photos/migrations/0009_auto_20210529_1244.py create mode 100644 ui/src/static/images/person.svg diff --git a/photonix/classifiers/face/deepface/commons/functions.py b/photonix/classifiers/face/deepface/commons/functions.py index b93c7e6b..71c56d21 100644 --- a/photonix/classifiers/face/deepface/commons/functions.py +++ b/photonix/classifiers/face/deepface/commons/functions.py @@ -1,28 +1,17 @@ +import base64 +import math import os -import numpy as np + import cv2 -from pathlib import Path -import math +import numpy as np from PIL import Image -import base64 -from photonix.classifiers.face.deepface.commons import distance -from photonix.classifiers.face.mtcnn import MTCNN #0.1.0 - -import tensorflow as tf -tf_version = int(tf.__version__.split(".")[0]) +from tensorflow.keras.preprocessing.image import load_img, save_img, img_to_array +from tensorflow.keras.applications.imagenet_utils import preprocess_input +from tensorflow.keras.preprocessing import image -if tf_version == 1: - import keras - from keras.preprocessing.image import load_img, save_img, img_to_array - from keras.applications.imagenet_utils import preprocess_input - from keras.preprocessing import image -elif tf_version == 2: - from tensorflow import keras - from tensorflow.keras.preprocessing.image import load_img, save_img, img_to_array - from tensorflow.keras.applications.imagenet_utils import preprocess_input - from tensorflow.keras.preprocessing import image +from photonix.classifiers.face.deepface.commons import distance +from photonix.classifiers.face.mtcnn import MTCNN # 0.1.0 -#-------------------------------------------------- def initialize_input(img1_path, img2_path = None): @@ -33,18 +22,17 @@ def initialize_input(img1_path, img2_path = None): bulkProcess = False if ( - (type(img2_path) == str and img2_path != None) #exact image path, base64 image - or (isinstance(img2_path, np.ndarray) and img2_path.any()) #numpy array + (type(img2_path) == str and img2_path != None) # exact image path, base64 image + or (isinstance(img2_path, np.ndarray) and img2_path.any()) # numpy array ): img_list = [[img1_path, img2_path]] - else: #analyze function passes just img1_path + else: # analyze function passes just img1_path img_list = [img1_path] return img_list, bulkProcess def initialize_detector(detector_backend): - global face_detector if detector_backend == 'mtcnn': @@ -60,6 +48,7 @@ def loadBase64Img(uri): img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) return img + def get_opencv_path(): opencv_home = cv2.__file__ folders = opencv_home.split(os.path.sep)[0:-1] @@ -70,8 +59,8 @@ def get_opencv_path(): return path+"/data/" -def load_image(img): +def load_image(img): exact_image = False if type(img).__module__ == np.__name__: exact_image = True @@ -80,30 +69,28 @@ def load_image(img): if len(img) > 11 and img[0:11] == "data:image/": base64_img = True - #--------------------------- - if base64_img == True: img = loadBase64Img(img) - elif exact_image != True: #image path passed as input + elif exact_image != True: # image path passed as input if os.path.isfile(img) != True: - raise ValueError("Confirm that ",img," exists") + raise ValueError("Confirm that ", img, " exists") img = cv2.imread(img) return img -def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_detection = True): +def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_detection = True): img_region = [0, 0, img.shape[0], img.shape[1]] - #if functions.preproces_face is called directly, then face_detector global variable might not been initialized. + # if functions.preproces_face is called directly, then face_detector global variable might not been initialized. if not "face_detector" in globals(): initialize_detector(detector_backend = detector_backend) if detector_backend == 'mtcnn': - img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR + img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR detections = face_detector.detect_faces(img_rgb) if len(detections) > 0: @@ -112,7 +99,7 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det detected_face = img[int(y):int(y+h), int(x):int(x+w)] return detected_face, [x, y, w, h] - else: #if no face detected + else: # if no face detected if not enforce_detection: return img, img_region @@ -123,25 +110,25 @@ def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_det detectors = ['mtcnn'] raise ValueError("Valid backends are ", detectors," but you passed ", detector_backend) -def alignment_procedure(img, left_eye, right_eye): - #this function aligns given face in img based on left and right eye coordinates +def alignment_procedure(img, left_eye, right_eye): + # this function aligns given face in img based on left and right eye coordinates left_eye_x, left_eye_y = left_eye right_eye_x, right_eye_y = right_eye #----------------------- - #find rotation direction + # find rotation direction if left_eye_y > right_eye_y: point_3rd = (right_eye_x, left_eye_y) - direction = -1 #rotate same direction to clock + direction = -1 # rotate same direction to clock else: point_3rd = (left_eye_x, right_eye_y) - direction = 1 #rotate inverse direction of clock + direction = 1 # rotate inverse direction of clock #----------------------- - #find length of triangle edges + # find length of triangle edges a = distance.findEuclideanDistance(np.array(left_eye), np.array(point_3rd)) b = distance.findEuclideanDistance(np.array(right_eye), np.array(point_3rd)) @@ -149,16 +136,16 @@ def alignment_procedure(img, left_eye, right_eye): #----------------------- - #apply cosine rule + # apply cosine rule - if b != 0 and c != 0: #this multiplication causes division by zero in cos_a calculation + if b != 0 and c != 0: # this multiplication causes division by zero in cos_a calculation cos_a = (b*b + c*c - a*a)/(2*b*c) - angle = np.arccos(cos_a) #angle in radian - angle = (angle * 180) / math.pi #radian to degree + angle = np.arccos(cos_a) # angle in radian + angle = (angle * 180) / math.pi # radian to degree #----------------------- - #rotate base image + # rotate base image if direction == -1: angle = 90 - angle @@ -168,13 +155,13 @@ def alignment_procedure(img, left_eye, right_eye): #----------------------- - return img #return img anyway + return img # return img anyway -def align_face(img, detector_backend = 'mtcnn'): +def align_face(img, detector_backend = 'mtcnn'): if detector_backend == 'mtcnn': - img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) #mtcnn expects RGB but OpenCV read BGR + img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR detections = face_detector.detect_faces(img_rgb) if len(detections) > 0: @@ -186,13 +173,10 @@ def align_face(img, detector_backend = 'mtcnn'): img = alignment_procedure(img, left_eye, right_eye) - return img #return img anyway - -def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False): + return img # return img anyway - #img_path = copy.copy(img) - #img might be path, base64 or numpy array. Convert it to numpy whatever it is. +def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False): img = load_image(img) base_img = img.copy() @@ -206,12 +190,12 @@ def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_dete if enforce_detection == True: raise ValueError("Detected face shape is ", img.shape,". Consider to set enforce_detection argument to False.") - else: #restore base image + else: # restore base image img = base_img.copy() #-------------------------- - #post-processing + # post-processing if grayscale == True: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) @@ -225,10 +209,10 @@ def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_dete else: return img_pixels -def find_input_shape(model): - #face recognition models have different size of inputs - #my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. +def find_input_shape(model): + # face recognition models have different size of inputs + # my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. input_shape = model.layers[0].input_shape @@ -237,7 +221,7 @@ def find_input_shape(model): else: input_shape = input_shape[1:3] - if type(input_shape) == list: #issue 197: some people got array here instead of tuple + if type(input_shape) == list: # issue 197: some people got array here instead of tuple input_shape = tuple(input_shape) return input_shape diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index f457687c..1cc9042f 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -21,7 +21,7 @@ class FaceDetectionModel(BaseModel): name = 'face' version = 20210528 - approx_ram_mb = 1000 + approx_ram_mb = 600 max_num_workers = 1 def __init__(self, model_dir=None, graph_file=GRAPH_FILE, lock_name=None): @@ -121,7 +121,7 @@ def run_on_photo(photo_id): tag = Tag.objects.get(id=result['closest_tag'], library=photo.library, type='F') print(f'MATCHED {tag.name}') else: - tag = get_or_create_tag(library=photo.library, name=f'Unknown face {randint(1,1000000)}', type='F', source='C') + tag = get_or_create_tag(library=photo.library, name=f'Unknown person {randint(0, 999999):06f}', type='F', source='C') x = (result['box'][0] + (result['box'][2] / 2)) / photo.base_file.width y = (result['box'][1] + (result['box'][3] / 2)) / photo.base_file.height width = result['box'][2] / photo.base_file.width diff --git a/photonix/photos/migrations/0009_auto_20210529_1244.py b/photonix/photos/migrations/0009_auto_20210529_1244.py new file mode 100644 index 00000000..7b71123e --- /dev/null +++ b/photonix/photos/migrations/0009_auto_20210529_1244.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.3 on 2021-05-29 12:44 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('photos', '0008_phototag_extra_data'), + ] + + operations = [ + migrations.AddField( + model_name='phototag', + name='retrained_model_version', + field=models.PositiveBigIntegerField(default=0, help_text='If classifier has models that are re-trained locally (e.g. Face) then we want to store this too (YYYYMMDDHHMMSS)'), + ), + migrations.AlterField( + model_name='phototag', + name='model_version', + field=models.PositiveIntegerField(default=0, help_text='Version number of classifier model if source is Computer (YYYYMMDD)'), + ), + ] diff --git a/photonix/photos/models.py b/photonix/photos/models.py index 6748fc8e..68b71501 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -251,7 +251,8 @@ class PhotoTag(UUIDModel, VersionedModel): photo = models.ForeignKey(Photo, related_name='photo_tags', on_delete=models.CASCADE, null=True) tag = models.ForeignKey(Tag, related_name='photo_tags', on_delete=models.CASCADE) source = models.CharField(max_length=1, choices=SOURCE_CHOICES) - model_version = models.PositiveIntegerField(default=0) + model_version = models.PositiveIntegerField(default=0, help_text='Version number of classifier model if source is Computer (YYYYMMDD)') + retrained_model_version = models.PositiveBigIntegerField(default=0, help_text='If classifier has models that are re-trained locally (e.g. Face) then we want to store this too (YYYYMMDDHHMMSS)') confidence = models.FloatField() significance = models.FloatField(null=True) verified = models.BooleanField(default=False) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index b3320b77..84c86529 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -338,8 +338,8 @@ def resolve_all_person_tags(self, info, **kwargs): photos_list = filter_photos_queryset( filters, Photo.objects.filter(library__users__user=user), kwargs.get('library_id')) - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='P', photo_tags__photo__in=photos_list).distinct() - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='P') + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list).distinct() + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F') def resolve_all_color_tags(self, info, **kwargs): user = info.context.user diff --git a/requirements.txt b/requirements.txt index be6de12c..ca29e2a6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -3,10 +3,11 @@ scipy==1.4.1 matplotlib==3.1.2 tensorflow==2.4.1 opencv-python>=3.4.4 +annoy==1.17.0 -Django==3.0.14 +Django==3.2.3 django-cors-headers==3.2.1 -django-filter==2.2.0 +django-filter==2.4.0 PyJWT==1.7.1 django-graphql-jwt==0.3.0 diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 32168c52..1e695e30 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -9,6 +9,7 @@ const Container = styled('div')` .FeatureBox { border: 3px solid rgba(255, 0, 0, 0.75); position: absolute; + border-radius: 6px; .FeatureLabel { color: #fff; font-size: 14px; @@ -16,16 +17,24 @@ const Container = styled('div')` display: inline-block; overflow: hidden; max-width: 100%; - padding: 0 5px 2px 2px; + padding: 0 7px 2px 4px; float: left; text-align: left; white-space: nowrap; + pointer-events: all; + &:hover { + overflow: visible; + text-shadow: 0 0 2px #f00; + } } &.face { border-color: rgba(255, 255, 0, 0.75); .FeatureLabel { color: #000; background-color: rgba(255, 255, 0, 0.75); + &:hover { + text-shadow: 0 0 2px #ff0; + } } } } diff --git a/ui/src/components/SearchInput.js b/ui/src/components/SearchInput.js index 656a7b83..0a907c91 100644 --- a/ui/src/components/SearchInput.js +++ b/ui/src/components/SearchInput.js @@ -7,6 +7,7 @@ import { ReactComponent as ObjectsIcon } from '../static/images/label.svg' import { ReactComponent as LocationsIcon } from '../static/images/location_on.svg' import { ReactComponent as ColorsIcon } from '../static/images/color_lens.svg' import { ReactComponent as StylesIcon } from '../static/images/style.svg' +import { ReactComponent as PeopleIcon } from '../static/images/person.svg' import { ReactComponent as CamerasIcon } from '../static/images/photo_camera.svg' import { ReactComponent as StarIcon } from '../static/images/star_outline.svg' @@ -14,6 +15,7 @@ const GROUP_ICONS = { 'Generic Tags': ObjectsIcon, Objects: ObjectsIcon, Locations: LocationsIcon, + People: PeopleIcon, Colors: ColorsIcon, Styles: StylesIcon, Cameras: CamerasIcon, @@ -40,7 +42,11 @@ const SearchInput = ({
      {selectedFilters.map((filter) => { - let icon = React.createElement(GROUP_ICONS[filter.group], { + let icon = ObjectsIcon + if (GROUP_ICONS[filter.group]) { + icon = GROUP_ICONS[filter.group] + } + icon = React.createElement(icon, { className: 'groupIcon', alt: filter.group, }) diff --git a/ui/src/components/ZoomableImage.js b/ui/src/components/ZoomableImage.js index 504635f9..3efd8ed3 100644 --- a/ui/src/components/ZoomableImage.js +++ b/ui/src/components/ZoomableImage.js @@ -171,6 +171,7 @@ const ZoomableImage = ({ photoId, boxes, next, prev }) => { diff --git a/ui/src/containers/FiltersContainer.js b/ui/src/containers/FiltersContainer.js index f2adade7..b2f5944e 100644 --- a/ui/src/containers/FiltersContainer.js +++ b/ui/src/containers/FiltersContainer.js @@ -129,10 +129,6 @@ const FiltersContainer = ({ const locationsTags = getFilterdData('Locations', data.allLocationTags) filterData.push(createFilterSelection('Locations', locationsTags)) } - if (data.allPersonTags.length) { - const peopleTags = getFilterdData('People', data.allPersonTags) - filterData.push(createFilterSelection('People', peopleTags)) - } if (data.allColorTags.length) { const colorsTags = getFilterdData('Colors', data.allColorTags) filterData.push(createFilterSelection('Colors', colorsTags)) @@ -141,6 +137,10 @@ const FiltersContainer = ({ const stylesTags = getFilterdData('Styles', data.allStyleTags) filterData.push(createFilterSelection('Styles', stylesTags)) } + if (data.allPersonTags.length) { + const peopleTags = getFilterdData('People', data.allPersonTags) + filterData.push(createFilterSelection('People', peopleTags)) + } if (data.allCameras.length) { filterData.push({ name: 'Cameras', diff --git a/ui/src/static/images/person.svg b/ui/src/static/images/person.svg new file mode 100644 index 00000000..ce0341d5 --- /dev/null +++ b/ui/src/static/images/person.svg @@ -0,0 +1 @@ + \ No newline at end of file From 8e5c7687182abd46940ec8361b2198fb7dff50b1 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Mon, 31 May 2021 00:34:00 +0100 Subject: [PATCH 034/110] Annoy index generation and cron setup --- docker/Dockerfile.dev | 7 ++- docker/Dockerfile.prd | 4 ++ photonix/classifiers/face/model.py | 19 ++++++- .../commands/retrain_face_similarity_index.py | 52 +++++++++++++++++++ .../migrations/0010_alter_photo_flash.py | 18 +++++++ photonix/photos/models.py | 2 +- system/cron.d/retrain_face_similarity_index | 1 + system/supervisord.conf | 9 ++++ 8 files changed, 108 insertions(+), 4 deletions(-) create mode 100644 photonix/photos/management/commands/retrain_face_similarity_index.py create mode 100644 photonix/photos/migrations/0010_alter_photo_flash.py create mode 100644 system/cron.d/retrain_face_similarity_index diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index eac2290e..c04c1570 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -2,8 +2,9 @@ FROM python:3.8.9-slim-buster # Install system dependencies - note that some of these are only used on non-amd64 where Python packages have to be compiled from source RUN apt-get update && \ - apt-get install -y \ + apt-get install -y --no-install-recommends \ build-essential \ + cron \ curl \ dcraw \ git \ @@ -75,6 +76,10 @@ COPY ui/src /srv/ui/src COPY system /srv/system COPY system/supervisord.conf /etc/supervisord.conf +# Copy crontab +COPY system/cron.d /etc/cron.d/ +RUN chmod 0644 /etc/cron.d/* + ENV PYTHONPATH /srv CMD ./system/run.sh diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index 80e641e3..b81f6684 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -128,6 +128,10 @@ COPY ui/public /srv/ui/public COPY system /srv/system COPY system/supervisord.conf /etc/supervisord.conf +# Copy crontab +COPY system/cron.d /etc/cron.d/ +RUN chmod 0644 /etc/cron.d/* + ENV PYTHONPATH /srv RUN DJANGO_SECRET_KEY=test python photonix/manage.py collectstatic --noinput --link diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index 1cc9042f..41035e63 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -4,6 +4,7 @@ from pathlib import Path from random import randint +from annoy import AnnoyIndex import numpy as np from PIL import Image import redis @@ -16,6 +17,7 @@ GRAPH_FILE = os.path.join('face', 'mtcnn_weights.npy') +DISTANCE_THRESHOLD = 14 class FaceDetectionModel(BaseModel): @@ -51,6 +53,19 @@ def predict(self, image_file, min_score=0.99): def find_closest_face_tag(library_id, source_embedding): + # Use ANN index to do quick serach if it has been trained + from django.conf import settings + ann_path = Path(settings.MODEL_DIR) / 'face' / 'faces.ann' + tag_ids_path = Path(settings.MODEL_DIR) / 'face' / 'faces_tag_ids.json' + if os.path.exists(ann_path) and os.path.exists(tag_ids_path): + embedding_size = 128 # FaceNet output size + t = AnnoyIndex(embedding_size, 'euclidean') + t.load(str(ann_path)) + with open(tag_ids_path) as f: + tag_ids = json.loads(f.read()) + nearest = t.get_nns_by_vector(source_embedding, 1, include_distances=True) + return tag_ids[nearest[0][0]], nearest[1][0] + # Collect all previously generated embeddings from photonix.photos.models import PhotoTag representations = [] @@ -117,11 +132,11 @@ def run_on_photo(photo_id): from photonix.photos.models import Tag, PhotoTag photo.clear_tags(source='C', type='F') for result in results: - if result.get('closest_distance', 999) < 14: + if result.get('closest_distance', 999) < DISTANCE_THRESHOLD: tag = Tag.objects.get(id=result['closest_tag'], library=photo.library, type='F') print(f'MATCHED {tag.name}') else: - tag = get_or_create_tag(library=photo.library, name=f'Unknown person {randint(0, 999999):06f}', type='F', source='C') + tag = get_or_create_tag(library=photo.library, name=f'Unknown person {randint(0, 999999):06d}', type='F', source='C') x = (result['box'][0] + (result['box'][2] / 2)) / photo.base_file.width y = (result['box'][1] + (result['box'][3] / 2)) / photo.base_file.height width = result['box'][2] / photo.base_file.width diff --git a/photonix/photos/management/commands/retrain_face_similarity_index.py b/photonix/photos/management/commands/retrain_face_similarity_index.py new file mode 100644 index 00000000..6e1f1b8f --- /dev/null +++ b/photonix/photos/management/commands/retrain_face_similarity_index.py @@ -0,0 +1,52 @@ +import json +import os +from pathlib import Path + +from annoy import AnnoyIndex +from django.conf import settings +from django.contrib.auth import get_user_model +from django.core.management.base import BaseCommand +from django.db.utils import IntegrityError + +from photonix.photos.models import PhotoTag +from photonix.photos.utils.db import record_photo +from photonix.photos.utils.fs import determine_destination, download_file + + +class Command(BaseCommand): + help = 'Creates Approximate Nearest Neighbour (ANN) search index for quickly finding closest face without having to compare one-by-one.' + + def retrain_face_similarity_model(self): + ann_path = Path(settings.MODEL_DIR) / 'face' / 'faces.ann' + tag_ids_path = Path(settings.MODEL_DIR) / 'face' / 'faces_tag_ids.json' + + if PhotoTag.objects.filter(tag__type='F').count() < 10: + print('Not enough face tags to warrant building ANN index') + try: + os.remove(ann_path) + os.remove(tag_ids_path) + except: + pass + exit(0) + + embedding_size = 128 # FaceNet output size + t = AnnoyIndex(embedding_size, 'euclidean') + tag_ids = [] + for photo_tag in PhotoTag.objects.filter(tag__type='F'): + extra_data = json.loads(photo_tag.extra_data) + embedding = extra_data['facenet_embedding'] + t.add_item(len(tag_ids), embedding) + tag_ids.append(str(photo_tag.tag.id)) + + # Build the ANN index + t.build(3) # Number of random forest trees + t.save(str(ann_path)) + + # Save Tag IDs to JSON file as Annoy only supports integer IDs so we have to do the mapping ourselves + with open(tag_ids_path, 'w') as f: + f.write(json.dumps(tag_ids)) + + + + def handle(self, *args, **options): + self.retrain_face_similarity_model() diff --git a/photonix/photos/migrations/0010_alter_photo_flash.py b/photonix/photos/migrations/0010_alter_photo_flash.py new file mode 100644 index 00000000..02bd7b2f --- /dev/null +++ b/photonix/photos/migrations/0010_alter_photo_flash.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.3 on 2021-05-30 21:47 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('photos', '0009_auto_20210529_1244'), + ] + + operations = [ + migrations.AlterField( + model_name='photo', + name='flash', + field=models.BooleanField(null=True), + ), + ] diff --git a/photonix/photos/models.py b/photonix/photos/models.py index 68b71501..19e30e20 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -121,7 +121,7 @@ class Photo(UUIDModel, VersionedModel): exposure = models.CharField(max_length=8, blank=True, null=True) iso_speed = models.PositiveIntegerField(null=True) focal_length = models.DecimalField(max_digits=4, decimal_places=1, null=True) - flash = models.NullBooleanField() + flash = models.BooleanField(null=True) metering_mode = models.CharField(max_length=32, null=True) drive_mode = models.CharField(max_length=32, null=True) shooting_mode = models.CharField(max_length=32, null=True) diff --git a/system/cron.d/retrain_face_similarity_index b/system/cron.d/retrain_face_similarity_index new file mode 100644 index 00000000..3e6877ac --- /dev/null +++ b/system/cron.d/retrain_face_similarity_index @@ -0,0 +1 @@ +*/5 * * * * root exec /bin/bash -c ". /run/supervisord.env; python /srv/photonix/manage.py retrain_face_similarity_index" diff --git a/system/supervisord.conf b/system/supervisord.conf index bde0d7e0..64c85324 100644 --- a/system/supervisord.conf +++ b/system/supervisord.conf @@ -29,6 +29,15 @@ stdout_logfile=/dev/stdout stderr_logfile_maxbytes=0 stdout_logfile_maxbytes=0 +[program:cron] +command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15" +stderr_logfile=/dev/stderr +stdout_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_logfile_maxbytes=0 +autorestart = true +priority = 20 + [program:webpack] command=/srv/system/run_webpack_server.sh startsecs=0 From fb8e2b68611e8db3c599d0e12d48226eabf6d556 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Mon, 31 May 2021 11:20:38 +0100 Subject: [PATCH 035/110] Locking during face model retraining and storing model versions against PhotoTag --- docker/Dockerfile.dev | 1 + docker/Dockerfile.prd | 4 +- photonix/classifiers/face/model.py | 28 +++++++++-- .../classification_color_processor.py | 1 - .../classification_location_processor.py | 1 - .../classification_object_processor.py | 1 - .../classification_style_processor.py | 1 - .../commands/retrain_face_similarity_index.py | 47 ++++++++++++++----- 8 files changed, 62 insertions(+), 22 deletions(-) diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index c04c1570..1dac66ec 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -16,6 +16,7 @@ RUN apt-get update && \ libfreetype6 \ libfreetype6-dev \ libgl1 \ + libglib2.0-dev \ libhdf5-dev \ libimage-exiftool-perl \ libjpeg-dev \ diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index b81f6684..25fe0948 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -94,11 +94,13 @@ RUN rm -rf \ FROM ${ARCH}python:3.8.9-slim-buster RUN apt-get update && \ - apt-get install -y \ + apt-get install -y --no-install-recommends \ + cron \ dcraw \ libatlas3-base \ libfreetype6 \ libfreetype6-dev \ + libglib2.0-dev \ libimage-exiftool-perl \ libpq-dev \ libtiff5-dev \ diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index 41035e63..bb441e05 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -1,3 +1,4 @@ +from datetime import datetime import json import os import sys @@ -5,6 +6,7 @@ from random import randint from annoy import AnnoyIndex +from django.utils import timezone import numpy as np from PIL import Image import redis @@ -53,16 +55,20 @@ def predict(self, image_file, min_score=0.99): def find_closest_face_tag(library_id, source_embedding): - # Use ANN index to do quick serach if it has been trained + # Use ANN index to do quick serach if it has been trained by retrain_face_similarity_index from django.conf import settings ann_path = Path(settings.MODEL_DIR) / 'face' / 'faces.ann' tag_ids_path = Path(settings.MODEL_DIR) / 'face' / 'faces_tag_ids.json' + if os.path.exists(ann_path) and os.path.exists(tag_ids_path): embedding_size = 128 # FaceNet output size t = AnnoyIndex(embedding_size, 'euclidean') - t.load(str(ann_path)) - with open(tag_ids_path) as f: - tag_ids = json.loads(f.read()) + # Ensure ANN index, tag IDs and version files can't be updated while we are reading + r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) + with Lock(r, 'face_model_retrain'): + t.load(str(ann_path)) + with open(tag_ids_path) as f: + tag_ids = json.loads(f.read()) nearest = t.get_nns_by_vector(source_embedding, 1, include_distances=True) return tag_ids[nearest[0][0]], nearest[1][0] @@ -89,6 +95,18 @@ def find_closest_face_tag(library_id, source_embedding): return (representations[candidate_idx][0], distance) +def get_retrained_model_version(): + from django.conf import settings + version_file = Path(settings.MODEL_DIR) / 'face' / 'retrained_version.txt' + version_date = None + if os.path.exists(version_file): + with open(version_file) as f: + contents = f.read().strip() + version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) + return int(version_date.strftime('%Y%m%d%H%M%S')) + return 0 + + def run_on_photo(photo_id): model = FaceDetectionModel() sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) @@ -147,7 +165,7 @@ def run_on_photo(photo_id): if 'embedding' in result: extra_data = json.dumps({'facenet_embedding': result['embedding']}) - PhotoTag(photo=photo, tag=tag, source='F', confidence=score, significance=score, position_x=x, position_y=y, size_x=width, size_y=height, extra_data=extra_data).save() + PhotoTag(photo=photo, tag=tag, source='F', confidence=score, significance=score, position_x=x, position_y=y, size_x=width, size_y=height, model_version=model.version, retrained_model_version=get_retrained_model_version(), extra_data=extra_data).save() photo.classifier_color_completed_at = timezone.now() photo.classifier_color_version = getattr(model, 'version', 0) photo.save() diff --git a/photonix/photos/management/commands/classification_color_processor.py b/photonix/photos/management/commands/classification_color_processor.py index eca7419b..bebd5515 100644 --- a/photonix/photos/management/commands/classification_color_processor.py +++ b/photonix/photos/management/commands/classification_color_processor.py @@ -1,7 +1,6 @@ from django.core.management.base import BaseCommand # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.color import ColorModel, run_on_photo -from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor diff --git a/photonix/photos/management/commands/classification_location_processor.py b/photonix/photos/management/commands/classification_location_processor.py index 7a31c8c7..feae17f4 100644 --- a/photonix/photos/management/commands/classification_location_processor.py +++ b/photonix/photos/management/commands/classification_location_processor.py @@ -1,7 +1,6 @@ from django.core.management.base import BaseCommand # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.location import LocationModel, run_on_photo -from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor diff --git a/photonix/photos/management/commands/classification_object_processor.py b/photonix/photos/management/commands/classification_object_processor.py index 6c813de0..fa78a804 100644 --- a/photonix/photos/management/commands/classification_object_processor.py +++ b/photonix/photos/management/commands/classification_object_processor.py @@ -1,7 +1,6 @@ from django.core.management.base import BaseCommand # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.object import ObjectModel, run_on_photo -from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor diff --git a/photonix/photos/management/commands/classification_style_processor.py b/photonix/photos/management/commands/classification_style_processor.py index af837d82..046a29e1 100644 --- a/photonix/photos/management/commands/classification_style_processor.py +++ b/photonix/photos/management/commands/classification_style_processor.py @@ -1,7 +1,6 @@ from django.core.management.base import BaseCommand # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.style import StyleModel, run_on_photo -from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor diff --git a/photonix/photos/management/commands/retrain_face_similarity_index.py b/photonix/photos/management/commands/retrain_face_similarity_index.py index 6e1f1b8f..431435ee 100644 --- a/photonix/photos/management/commands/retrain_face_similarity_index.py +++ b/photonix/photos/management/commands/retrain_face_similarity_index.py @@ -1,33 +1,47 @@ +from datetime import datetime import json import os from pathlib import Path +from time import time from annoy import AnnoyIndex from django.conf import settings -from django.contrib.auth import get_user_model from django.core.management.base import BaseCommand -from django.db.utils import IntegrityError +from django.utils import timezone +import redis +from redis_lock import Lock from photonix.photos.models import PhotoTag -from photonix.photos.utils.db import record_photo -from photonix.photos.utils.fs import determine_destination, download_file class Command(BaseCommand): help = 'Creates Approximate Nearest Neighbour (ANN) search index for quickly finding closest face without having to compare one-by-one.' - def retrain_face_similarity_model(self): + def retrain_face_similarity_index(self): ann_path = Path(settings.MODEL_DIR) / 'face' / 'faces.ann' tag_ids_path = Path(settings.MODEL_DIR) / 'face' / 'faces_tag_ids.json' + version_file = Path(settings.MODEL_DIR) / 'face' / 'retrained_version.txt' + version_date = None + + if os.path.exists(version_file): + with open(version_file) as f: + contents = f.read().strip() + version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) + + if version_date and PhotoTag.objects.filter(updated_at__gt=version_date).count() == 0: + print('No new PhotoTags so no point in updating face ANN index') + return + + start = time() if PhotoTag.objects.filter(tag__type='F').count() < 10: - print('Not enough face tags to warrant building ANN index') + print('Not enough face tags to warrant building face ANN index') try: os.remove(ann_path) os.remove(tag_ids_path) except: pass - exit(0) + return embedding_size = 128 # FaceNet output size t = AnnoyIndex(embedding_size, 'euclidean') @@ -40,13 +54,22 @@ def retrain_face_similarity_model(self): # Build the ANN index t.build(3) # Number of random forest trees - t.save(str(ann_path)) - # Save Tag IDs to JSON file as Annoy only supports integer IDs so we have to do the mapping ourselves - with open(tag_ids_path, 'w') as f: - f.write(json.dumps(tag_ids)) + # Aquire lock to save ANN, tag IDs and version files atomically + r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) + with Lock(r, 'face_model_retrain'): + # Save ANN index + t.save(str(ann_path)) + + # Save Tag IDs to JSON file as Annoy only supports integer IDs so we have to do the mapping ourselves + with open(tag_ids_path, 'w') as f: + f.write(json.dumps(tag_ids)) + # Save version of retrained model to text file - used to save against on PhotoTag model and to determine whether retraining is required + with open(version_file, 'w') as f: + f.write(datetime.utcnow().strftime('%Y%m%d%H%M%S')) + print(f'Face ANN index updated in {(time() - start):.3f}s') def handle(self, *args, **options): - self.retrain_face_similarity_model() + self.retrain_face_similarity_index() From a83752264af60c98a0590402982d13a210c23998 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 2 Jun 2021 01:14:29 +0100 Subject: [PATCH 036/110] Fixes face matching bug - ANN and brute force distances now match --- photonix/classifiers/face/model.py | 4 ++-- ...tection_processor.py => classification_face_processor.py} | 5 ++--- system/supervisord.conf | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) rename photonix/photos/management/commands/{classification_face_detection_processor.py => classification_face_processor.py} (81%) diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index bb441e05..dd9117c7 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -19,7 +19,7 @@ GRAPH_FILE = os.path.join('face', 'mtcnn_weights.npy') -DISTANCE_THRESHOLD = 14 +DISTANCE_THRESHOLD = 10 class FaceDetectionModel(BaseModel): @@ -92,7 +92,7 @@ def find_closest_face_tag(library_id, source_embedding): if not distances: # First face has nothing to compare to return (None, 999) candidate_idx = np.argmin(distances) - return (representations[candidate_idx][0], distance) + return (representations[candidate_idx][0], distances[candidate_idx]) def get_retrained_model_version(): diff --git a/photonix/photos/management/commands/classification_face_detection_processor.py b/photonix/photos/management/commands/classification_face_processor.py similarity index 81% rename from photonix/photos/management/commands/classification_face_detection_processor.py rename to photonix/photos/management/commands/classification_face_processor.py index 28dcc397..04364888 100644 --- a/photonix/photos/management/commands/classification_face_detection_processor.py +++ b/photonix/photos/management/commands/classification_face_processor.py @@ -1,16 +1,15 @@ from django.core.management.base import BaseCommand # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.face import FaceDetectionModel, run_on_photo -from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor -print('Loading face detection model') +print('Loading face model') model = FaceDetectionModel() class Command(BaseCommand): - help = 'Runs the workers with the face detection model.' + help = 'Runs the workers with the face detection and recognition model.' def run_processors(self): num_workers = 1 diff --git a/system/supervisord.conf b/system/supervisord.conf index 64c85324..bfa3df04 100644 --- a/system/supervisord.conf +++ b/system/supervisord.conf @@ -138,7 +138,7 @@ stderr_logfile_maxbytes=0 stdout_logfile_maxbytes=0 [program:classification_face_detection_processor] -command=bash -c "sleep 13 && nice -n 19 python /srv/photonix/manage.py classification_face_detection_processor" +command=bash -c "sleep 13 && nice -n 19 python /srv/photonix/manage.py classification_face_processor" startsecs=23 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr From aff315bd4d5cc7b228765a059e2de9a5caa566a3 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 2 Jun 2021 11:23:26 +0100 Subject: [PATCH 037/110] First attempt a face test cases --- tests/photos/faces/Barbara_Becker_0001.jpg | Bin 0 -> 15996 bytes tests/photos/faces/Boris_Becker_0003.jpg | Bin 0 -> 16291 bytes tests/photos/faces/Boris_Becker_0004.jpg | Bin 0 -> 14336 bytes tests/photos/faces/Boris_Becker_0005.jpg | Bin 0 -> 15020 bytes tests/photos/faces/David_Beckham_0001.jpg | Bin 0 -> 15945 bytes tests/photos/faces/David_Beckham_0002.jpg | Bin 0 -> 11721 bytes tests/photos/faces/David_Beckham_0010.jpg | Bin 0 -> 12052 bytes tests/test_classifier_faces.py | 51 +++++++++++++++++++++ 8 files changed, 51 insertions(+) create mode 100644 tests/photos/faces/Barbara_Becker_0001.jpg create mode 100644 tests/photos/faces/Boris_Becker_0003.jpg create mode 100644 tests/photos/faces/Boris_Becker_0004.jpg create mode 100644 tests/photos/faces/Boris_Becker_0005.jpg create mode 100644 tests/photos/faces/David_Beckham_0001.jpg create mode 100644 tests/photos/faces/David_Beckham_0002.jpg create mode 100644 tests/photos/faces/David_Beckham_0010.jpg create mode 100644 tests/test_classifier_faces.py diff --git a/tests/photos/faces/Barbara_Becker_0001.jpg b/tests/photos/faces/Barbara_Becker_0001.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9be1843486cf63fbbf64abb25324d681e033bf90 GIT binary patch literal 15996 zcmbWebx<5#^!GUgOVA*LOA;Ui1_?4)kcSXFL4php3C`fo;1D!80fIwto#0MzhrtI1 zcMCAM4P^Pf@78YB{lZ z0P~}txLE(o0RN|9J$dvI5C7>i0>Z}*bVC< zl{7T9v>`gWdZuRP7M51lHZHDi?jD|Au;6bYp<&?>kqL=El9E$?rl$SM%P%M_DlRFl zuBol7Z)j|4?&|L8?du;H9GaY(o|#3?&7;=VH#WDncXs#oPfpLyFD|dHZ*KpG3k!h# zzp);-{~PT8!A17S^#lh88wdY?xUimhJg(SeIJhtQ@W`dr@xM4xyy6dhO8GuMx2p3Q zi@;|z@T>C#0Trv@8r#YLp#3kh|94`QTEAj(2srR5O`%7 z{($2tG&Y0cC7x%2g-C4}>3gA(-Ejuh{{W^7Sc!d8#?s$%PHaxxCH=|3Veau~+`#iv zj4=8Bax9Ar4@mGfrVqywE}E8b_e(sG#$ zuBE@0-1YCPSN*%P3jX*H&;>G4y$i!YLnadTT{#N5&r-I%qL|hKF`qfA9Sw2c1s;GpA=kN8_imjahA^9#G@I3 zw^#)}*l6(Rdhi;Q`r|19^9kG;ZTYth{p)m|?zkiEV?~UI+{%XGv<>G1OK3fvribOG z9q>-_AD}v=Q=}$4491K7;MH(FWAzVUt{Qa37KD+Qc)WV0-2-^BSn)zcbmDqS)^BLh zuiyA-0dq5&DejNL0tU*gbsytLEp2ar)A=d-qe1``SI6Jd^Sj@>pU+`wBNok#|s)>vyY7an$Io zME{P*c~>v;A-VQ{Eqo18btq*%spPQG+_==8x{rt4q^$e-O&AxrGXo@P8x0>wrs(${ z=iP7R(&buPjbb(P%|F0`z=NQ_wMJW$j=!1KgZXuKaxG++y_YxmAAsaMT|4$8+C3fE zDT4ca)30uW7#*w1rGUOKrrirRB|C{+z)VI_~rCnJXZ`6E0K} zpKOMxfO&-E4;8c2`E*rVPJT5Xi_GHw&5si4z<@f%Wlgu18W~K|Zas{qdXjCOjF{K$ zhz9*uc=-Dx?-tXl)$0)9BPC0_E?G?Dy^DK|-rH?OU4FC?WX=+zl``G+LbnD|R?VE@ zc>&%Na35naS2;pT^#JEZNL6G{ALnS6JvGTD!s=Pko)f8i> zsp8>$?y%g`h3I{(#}9BiHkx4yy$spNdfT{{)`L99@f@Q_yI>C~(#{s7Jv$)#9Ac!% zGGeuj`zAinnc z8gCf5)C((2*q#KU02>7^&30jNSYKjcGufI!lL9HU!1HUE(WqfIUqpGS1Wof;<7_|+ zS)%57ETtUVu&}X=fn}syjiXZ8iIE%JbI6zO)k&4eRb>PuifQLb{-&>%>3Rz!_L!Hc zFn6Za*rMKEm|e0CeX(*#=w;3q0gSk`iI5cDPn#!uB^Ko0_PI(OW>6qq=<$ciZ5Y}c zuO5J)fs`2%4~5iVR8H67x>LlPa8>!};6e8O9yv4$}$ALlizV`DooG4OJQDBrfgb%Rh&w1AL zGGQIFsK~*#;Gwsy9scHi$3gGvrE-#u&yv!SvxN81>D*gJP&3*Nkv1G<0wEUV`bP8s zoVwd>O?1n;BYQe~Ms=DLjf6y3@B z$den@r&zyM+GH$jPscKxiU}KCx8`8zk7$2+Uw^R{5gx?=0A*7r7$l5gve&?Ux`x0X zgD-E|KsLQw&6x_~6yp#r>#@8R{=&RZtTA`p+T}e!nZ!9rgOB96Wm2C1n-ErKXfPTt zp0Y}|m0`U-0xcBpK)~{`N3Kzvsjgdn@xn^F30gI0b@rW6^c9Os){~O$x|f;LX1ULz zj{2E1y^4Arq9hrvtqV9>nx)+^Wb7;OB#iOh=Yw{3{%Fb5PeKD`Py00tY@$lrNbT0w zLL=qpmRYZJP1nvq2ywD+@8s51Hm)H+-;;G^P3zf*zBji441CzTqVQlh1pR^D_pjdb zM;@6S*zwo1w$z_To#+>B@bD3z<`GrEDF(u&4CgVHYWdx=hw<5QgxTW*u-ULbBOrV# zNV3(*@II{f-6y&6#^kZD5H^!|Met1sQyy-W><4~w{(dMOasp%MW&Oe%)RZgQ`ClJ= z-;8U`;2wZ{cxAP|_SwVgwL`Obq0>!(tPCSYtd)K>PT|i-=GpdFDeLxiwpohPN*i&f zPyF@vT1oHZRy>_I`%C*T%iR9~lH=2MbLoDVyPxy*Aaf(4*XtzH+J7tB2iiU%h3%e~ z72BC=(JM0cgV&$?I8R_?vB8;L{Vxpu{s?f{(|=$(@m}nU8fmicKpG3OfaXqLZ{`ib zMvwk%z34{r`LByI)QgtSws=P++e-+j{ncjlDlLg{o?zh2{Fb2TK$9%Ol+@$7jJRhE z6|KA!Trg;eSf!}KYt&ECaAVTwO79wVf=%*YRi<}7FshfxMdi2{zPEOI@fE@xBK%>& zq2m2q$EYUT@k>RaS_#63=jLg{FFnoITT^=48N3MxS_BSeUI!Hp+d0oCf zZUel^^ba6WFSd}Wrl+eYfxY0f@__B@wVH%lz=hUc;Da{g3oCy*`XLsTWC%;eKy?TiqEj~iJ zZ}l=o;9YTe2S#4 zt!m`TMUM?rzSSms|J>};ga{Y(Z!X~Wa~~M?rL5CC5L>H?=j#4&m8@%V&IpRw?escUAmS9jlgaH zTdvzRtJq*&sRq!MreR8Q%(c@kKd}+}P*bv>^6;iTMPtSx>&V?UIqvAGL}2+sdU%l& zj+CogoCf&U_cvkJ=_SXXt#4gwO`gAyMmlaHKWpnT`J`uV_J)15HrPHMRvD(*Ijf+Y zDCgF=*GIghi?1>9fOv?flHZ_N*~{^l}IJUyAY$t&#n3Qsw;ZbpT?bczA6|O zhn9Os&Y z;o32;fXQJI;oVxlE+jkAVG-*mDya^>_WRn>p<1Gy*lBwk;YZu;B52--3svkf`}L&B znK2@+etU`k_KdYL5+~+G-(9T=BMpz_Uu0xll_gXSHcfFEvwhj0^@IAN_SXaKUc>~0 z(>=&*lfoG!meO&gxXrk0^Gp)^C0~EI=fwot^|G^9Cq2}*? zPKw}cMW#n~zB~JH`Fn0=aF}J)fz}4lvXFR+%rwkQeKy%56`?IG28pQT;?e9IfSp&q z|L~DiT~@@oXT3twn(}>fRAQuOqyMo+C9RT{IH~b$y*|w{in-9k{qvmEG31Ydwo0A* zZ{1O0a}7+{E}Rx4x%#SgcJQkPuWn7}IMc_Dh3I9<+C z9v6W?B&zA1NEMaG5&N(6J;P}VXlh2%7MxRX{ad~?A!BgPL0XmM((7@r81WlCH4=qV zcfG2p`xzcd_2t~QrWg*QY76iBJ-Sm$h{}#cyw(tAs(D>y;ci-eC3W!o2g%LrcBk_#9}jp+Vlmck)MjoQYbfX)OV#weRvw^esBoN9f-Wd zf6|V)_E;;f5Zul4G>y<9UCMdzyI5brZY2?1&~f0A7Pud$uk+=|c@ZRnAW}6TxFNEC zIi2|nQQfiDmch&NYf5nC#kU4o>5LP@nACji+5v#e0;5e#h&54*&P6|PPVYlL$H35e zB~9P4#*|BSl?C$Chj8Mb8+DL$d0hP{%fxs?i73vAAcZjw_EU7{*wf3(7#T^^u?t^3 z(dRqX;+tRF^{(&H1jrt;QD1ZFB=>+n17gTIR8p&R>o=davVartPtPG!chA>_Kt@_4 zR;|Lat`SV-39#ZQ$%QR?=ui6d0d3$625@qY!jCqoX?9LdiK4Kk!75vnvgQpwSz3^+ zGl>RHvvAk>q2clIMOAIeT}lX|mYJIIU}LS&5{3_=Sv$S!R^niNiP%ZD?^{GWTxFry z)92t!*C&Iz{-hYq2NraDVkv_#Y85BIWlXf}HhQSMXtq%A9Ye+P#~=S0K6_GQIaR$A z1GW74MbOu)WO-b{r9J^dI5#}SJ=(gv5k+_9ABIZa{q@zk<7=xIcaVEWRf_LYX9Ylv?SCo6$g3Mm=dU{% z|G?PY*Z8ophqiOX?QQjQCL>BlJtfQNx&fSxtdLoMv|31lL!@>Y=enRAHuehm)V}<| ziL6+3R}oJBOD!q(Jtw!(j>vtdxJF3je-+BUZ$nY5?uWe7dJG8IF zc%mKmeuxZ69CY|YRjC(jmfzH%?}Ne*1?vbp#h&r4>`k`I{V;?b|Ga0aT?^^FOV1(| zKX!Y!sMJv#WjN-nC_YVLQ2U3~zeRgrmTl)VrR_5eSY0QCcgm>)curSr-vw^ zC(!jNpOL%hgsKHkpJWq4lUs>WE(u%{sua2+7N zH3D|Ncl38g!NS1xCfWt{S1A5v{1Dc~dQ$=3rEt`wUKZJgA-BW%-mf@&T$<EpS_Dk|y(HlS9;NbqtX%HW<5qbUnvSRA%ol1I%{*Npnu5@G^ z-#!$M&7o2H%S-ngi*|~Xrj_4FmLN2pCAY3`f+$oW{*Jf6+r5o1ii+90!bb}|+!LF7 z-8Jco3d!^^xf~evtqzi~RH@G(N~IP7o`b1deJYMBnP~oau@bn9x}S871e_Suhvok? z$KiO=@-)Q;OD(+Vv(?yJv6&^N&4pz-mjF)kCL53Wflj~<*y+k}&}L^`5~ua-mwJon zlQy2wu(;)}R^GMxU&5Tpod6Oj(*7d+uWP^WK?V#tv9_JzCca-ff;qcy5hrw7R}D|I zdA|T0{QM7~cSU~g55okGk#_d`maXr9#UM%-qEbxRInfq zOB@EdnT@#^bH|pwK~~S*8PpIe6*4L*Yiz~RU>HxRgO4(KUY)%^cVjFI@%FBB$HkNG z-2Nktk)~wgnk-Tdd9q_o!;+PBTK5mot=#GF4Agh{Ik&Mm>*~Y<*?!w=B5HYL>vvMC zbc0<;dRj@b-Vl^?q8iG}{aQ_HL&7gzs855_lclU$QVV~G9_={x?-)DzEw{5Ot3KyR zu^6(O2~WpAmV!?K(ahmI-_DZoofErmNtI6WGJc)TRn}FlD?9h)=-uuLEk8E}<2LhVIlq-PZZWT`BG_;( ziUA365bzUy=@`5!)!Vh0Xfyq-)MRs&9ok^ey`Ew(<%#QaK>2O4G^VgU_S(+s%hZ=- zd$WFNBS{IAkYpGa#P*i33ljbI^UbsS{qrS|R#la(m;FZ9rKu6^+TanGnreTWP;=|2 zKUw7(T<{bBh{SY!8Ck7h#q>RBJC`!6XzJ789tDzO{U0EtmP5_J=50U6PKCV7o&2@E z?pFm~D&uteaWrTPPDf1Gqdz%x>{Ar_A(pv>1=!V{gD6zW5v;0`dbKErcYLM8hu2au zOzmp&V~fr7_)aR)Y2@VRHj_qFaM~{WiN&3|V>JU19`(TNnYF)eWqXuuZG=bctGu>C zuxxzojnKHOsOu11vs3?CA>(Xu!vM!yVmviPqKYq{%Y1Ma||{a+|y$cLeGrNG2iQ zQ-sKM`-?}K z3p%)Sf*-|ud{H(ktX+RX9P^{9wn4~hLQcjkuC#+`IcZlfw9W0rc-6hZPEhvf8}5e4 zR(%ZqnxPS5Ui-W4S=+B3^`zMNa{IuU!yP#Qk(&tk7JHRZ=FpFJa+_u6ZOE5hzS%$T zD%M?FLej(dNmItN>Tb!y+xcE277WJiFpK|-k)Ll9wf&N$Zaptlv&E^9X zrpwLY59W9^?8Wgt0;4*oNUK7-c@1Kt0d=%YroTQh)hN*mo6|kx(MYa^Ws59*@?Mt$jvzlOQLlZ1iTU@$D#erWZ#m*R`-yyi zAUpgj`ithK4>~R`;9|Z`PlNi}#{bE)>6@rcJ)&l?ltXP(9MSC#t1l^CYR&n@u{#YXDiY;)1q*&Yc`ALN zFL+UD=_fkgcYroi0J0Es! z>_gA(NQb{xo=dU*1K6pO^?RH^Ki36WKp&L?0qo34tYoQTOxc$8vVQ8wXb64=qzGf$ z-aON)US_PaRpZ=eu`iCisk(lG>-Ft)d+8@fr^Laior@5R0ic6ooM0dG4weiv%A+eDepQ{g7>qc}4~Nx&3~QnEWyz*ZQUOd| zRc0yo8YCnFh7A!fVl}3y)|rd}Kny6Ru`27$hL?$JIFUm?CA)>B_9g3-1;5Q(>|t5} zqR!WRbwSIJiOWP%3^`=P9Cj$yR*8?P172DNyUrv`qQKH;0if;?qlj$TAH6LJGG#*9 z_S(V=!DGuildXUH+WFdBXJpG&w{6JDt3Iso_wFuwz<>33S%X2&RdK+Zt4bBs`TN=> zO+#t3GXtt&QTtDx)11=pYIZ@=qt3naSs>`@F}XDpOY_6zxC2JTQr0TEwU2VJ&+pK? zD6F0EK#%&HpmO7Jx<*SQ(G?2(H!zuP1B@5Pfpq`5x@iZ zK$7ZO31vHYd9!@EYxVfwR+C|yf`-c zxUfBbVbYY;i}V3O!S{O!xN)QO-@f!s`i^>$Mq@;7-IE{lVc*u`BWl&s$jF_Jj6Z<;ZvU>`?ed)Dl+Hp0$qg7WiBNi!%ggcYaOhUBRPbawzFzIQcYft^Q5i`lO% zE0V#@$nOKzZ_pell^0pQwxsFOR@atdlH~IdNMA$reQ%>=0VghlP*ukiBG~8POX?s* zQhpEhww8hEf}P7sJ^MGiBXOtId$SiSmvFXwzG1AJK>aM-n;fgbzW?CAay7Dk=r3OX zHI%Z{HjD`xI~e}y67{vEP0~CW$THbFc3oG1U^Z-#7Iz+~HPIgY*W)41fRz6tm1@hsA_Pix$Ni*R-A3@uz7vRiZ&0}n{)!quLqxe3 zG$@E3>oDQY5LyQTGxCG6X^#y(Vgvq!#5g-mw$9Q>)ucOfK-k4SDq9V4jfD5LvFXq= zpB-f8*Qc1(r>I|k{CRhOkl{=H8DhdFf=`Jc#K_f5DRq}B{eEMB>8GWfe*k+rs=gr4 z#fl;EJyTf>(UhrP> zx&N$)n>7Pzr~Hm_j^ezHpQRG_u*|AIb0BVJXiAe=CX;e{(?Wg%A3}1yb?BmNz&&Vg z-#@f@1-O*HP!{?kn8krzs=#@j7o4AHKX+>b!|E+YJj9&O)L{_^{DZ|2>!gP42 zs)p;UvBvFYmtWJ3ehMe?CoYI`ckWoN9WY>4k4c)=fvQ?EwkrY{ELz1icC;lh1eXeT zE*)uqVWC}}N^cl%!22lkq=Gw^Zz!zY|KL%6%${q9YS)_eAY-*hI5mHq?&lNnlG1C!-B(U5zPaD3B#i3@Na743y`3I)6q2f@uBGwH>x6 z8pTK^%@ahF(GI0Itx58>hhImkm9%`_#JVc0ytea)T3UqYeqEklTr|0Yzd88_h|uU< z7WE#Ui>Qx(D?#j;68#TgL(A>+ezqAmq+2g+WDG;vm0{haY+8zmmr~3+kfj=`HY#@i z7@*fgFN&L0%0vG7rAzH0_~aH9lchQp&178Aku{9Vkw& zvPRa_gNJGkyB>^yD>0BykG6;H)@7o-&AN$O<8_QUTh(nBRCLa_tL-XxYNv9Hg}u2< z-F>0yNVkrOxXmK~l3Q2!$&Dsm)tT_2y4%j^+QqvD?z0ALk(c^F>TczkmTC%+egBYO zUBgMD%6Dya#o-3NSB1H*DGTE>+N)y&=I(=)zZG(nYO=KlEe^RCa0h%0o6DBV(@J;P zSV|g8=HCU`(YVC}Fs{H7lQMOD+-FKm@kaa+o_4a#JCrvG) zPr;~iubko>vyKGVjPA;S_7~L^EjOxv06CG(84i&|mY=i7tsSz?u(F>OBLSH1cs;t_ zZh5|)04Osg-7N<{mm%$H7lD%3*g1LD2cr}>U6Vig6PlMi`96g&IJ0Q7RTcv-dw)o) ztEkbCduW7MJtkv(r=IqtCyz-2qrDD_N zY^ZN0L~nX&n(M_JYiqYfy;~qz799f?-4d3D`o%x=x?HFn^_##A;-e1u>47b5EXKwf zy7fXWY-a>Vn&Ql`wL)v-_16tGPdokr$jL@s+i52-)5%)cfCSYwO^3rDvJMP{cYEPW z4;>QFTJ%H4*Gc=N{}v>(F$LT!{U zX?}u1mDNG3u+Qvu;KutHvmr-=Uq>IrGsQ8d50|BZ5N6GJ)<67RCe>5 z>BS#rX#$m{NU~#i>aHVfXRpW~qt`2i7-E-YlKYfeLgE!`?G_)*_67q|H{bKOQa?ZE zS>|1phRq*pS6`YN=$rOG4>UQVICt@eL;3B4nKx}ymE(RSo-W;kIL&^UL~M@ArKT5^ zrFSx+Y;rol`LH2PR}8f8)qQSV3qM)KU+(pTOD}3Jd?XmL@6Q1C{$AhW>-XmU{jgQy z*6r7_#Gef++D*sg`~DW+dj0`i9VP2JuhZ2ez03w__R`KI1TI6T!G>R47uqvq z2jkjzCR-#_PQZlVRGdHqQ=G4jPz|R9Ln6aZ%+8x?*VYe=2Mv0Mqu(5hE%}r=sPSJM z61+Q=^;uYL>YZ79#n}7BqEl*-J#zed;Z?sc$rF2h#{gmW*EJ7}`k9gwAHFBM#2*^V z%Cg~_Zu0Rvr#Btb=|401{H)_ieZ{8H5-5I>w?DZoZMR~36JjYdXP>|K)o+8069c(6mxrGd#U`qNn3)UwKAOCG<+v};-aY9&87FNs zeq%Id*^=h?)F3V&IU+Vzzq2q^SR!4+>7@;=S~9(P4ui$(>)E9$ntrz_Ek0u)3H0_$ zina(ml79U3VLz#$A4dM3WSxI#-q^x9b=or|6X8tL!{IJo8Fk&UX~9+BZBLrt<&Q>1 z;U_%7EIo>9;58j0n9m3F#x~0ec+%6#D{W?c)hmxi;zUnWr;p&T+wxb%;HX1UgL}}R zk4)K3bG`?NPE6*_P~%V)e(nWuf0-j=!K%5u(^<=rG#5^Ff+L%-{05;(&fK46JvFm0 zRyzC%k?+4&grV?SeX3po=N4D8Pu4Jh4zdbxqn95Joc0BEhDG7E!1pQm|LPVj|J=<( zHAbwla3@B6samf00ow3_pK-b`A`syZgr8nM|~7K_{Apj*Y#`51aW2NpVzlYoL|Z17bZ9HJK5+6 z&yAjHkO6e|)-F}Nm*JN zTt^oJs8CORit7!2h<3%^^y%xj>d8BEOf=WL=-KVL5!Ya!Sma?&qf}tUp%E{Mr>e5h zEmO8-k({K)W*6ngUgUMZ%Cscw*lzOgr6Ga%y6{mLQ|#I?!_w2OnW*|wuRdqR^U_SY zh8vq{O#Y8o^J{;|dHm~Izr6C@HSu9q0qJ-5s#DmL&Q@J0D~1$Z zemkgNKfCHxMek(zD^=wg5`Pa4WrXf=^e1yL(`);j*`BxY?`c-JQ9*J76i2<5Q{0t_ zf(^~Tb&k{3n~bGi;J$eCSg+w?*uuy6B7s z2YT??Md4!g&3D`b_@TsxqnUGA6Z)brw0aUFPoz=DF`||BTLM`9db>U!dov&E!uRe% zN4<5715Qrjug*okH%5$k-OiZ615eYIs7T%3YSEhR?dgNoSZW<%uvwj`Phxg*+j9oE z21?Gq{{c`N+O?B{2O_T=c`Jj0%BD%m{TUzwDM&0+E%7&^x5#22*OTwpQ~eDo-&k8#g+$9U^=M`>BfvN!s*Pg`L-h2SjUk&$)A%MacJ(<&)ZIlHiYe#i zEcbj2-bwI5oaa4=qz^4k^aXFmjpe+=)Z|4&k}9V(vzqm%ddKL?k)NA^o_eyN7C?_H4HKH5}hGl3%Sj*71b& z8&dbTa$^cb@v!M^W2kop&zBZ=HDn1!ayECiR`7nER?ExaqUgG;i_B1Vo72mge~0G4 z2*+ZBN7vR@)7qs|g{T`{6b-EJf4AzVb(n>7qV+q~-&1r$6rSv0YcXZXDgC*yzE+A7 zVKN|-fsFts*@_jzv0++E?85>_W%}Ij3Nt+gSGG>P1&n{M!I4DCr~ z`GmGchEv)>Ew@^gWQ$Z^YlB*?nBT7(dg*hx)lHGxYqw-_Kn_x|2oa)T zppxqN(KKz(w(@OW+DDSYrQnS1il5yaz*uuWR+rhj3ADh$XMDnS@7MKBXU|QTC+mE` z#bLovrmTyS3hVXL`3Hjx2=%j+Pfcc0Z$`Byf!`%LZ3pIXo_Gq5GIuw?>Sr|BB%Eej zjES!4HhS4uGrl|-x!P8zJ+8g> zu>=gHiRW+=Rl?$TsUp}wY^%nvPn^=wP_ZPlt=GPWPvxR0P0C|OQWF+>#I_sOK~li(<;fZK z=e!u7#g-7Q$Lh`_?rUwo=r#P`kEwKQpTX_o!vIgjfl;mbP_1}moU^vT=6KdDq2Ot_ z3}KE$YE5FSQC)25wf(v=flXSzy2)I1GCA7uG5utlc4PwdblV&;^Q^ zwsIJ?HG;cQdi&IRMl}SecfG#PL$#RS?j@{*LhC82KW3?!WpgQhRnXKIFP98q6_;$_vZq}lv$P15f3#Yr$4F2WV z;CyE|Pi((COITjC;>*(0v59Vi^0?4&KEtt0m*qOJGXK^T4G}-FCF%=gC|=>kW&^SE{WbTd)0~Y>N$d|11|*TIzMZAKE|+`3CJQ&M zlt8S_NR58bhAU_5{kY#3gQPFCmHj0lC1PSmJJr|0UBCC^y{gu=#6x?%>I>GIxXc}v zk{HV-{OF*TYE#Q7|D?n%tXOc>x&-NH>=E8ZohRn#N4YP<7rOJC+bmYNIPqx}FxRqd z`K=j%oRmgy*Al&YMt>z=q4;GjZ#yIR03*1h(>9{TQDoM?n+$1=`rZc6eiVb-Ji6Mc z8|pPvS7mDAm&{|;eiB`%hVHV34-Z_TOaTzXd|g5)O}rx^Q9X#aUx>pgcd0y#@8?jw@h9RJ zvEMU(lo@6;bPcDnUpo~pjArvuSW0*TM?7Z-xOxLSD-Sxw9BYErruvr~(n061e~Wt* zWL9eWOiwi>X->}%T2p7g(U^v)LS5o#TW`la>RV7VQ@TdyrFA2^%homag!;cF|3V1A z_{dngkK;mR)}JQKY^@|@#H+3Flv@kAOSNd;Z*WF=fOm}8T&Eq3K+=TiE%6XjwltGQ67-4h|Ffp`JtWIOD4iDDffgr<=aJ`p)uOecIp*V-< zan*j9D}b;7PbwZ#Btx|yP-j=;zs(6?fySynWYv}veIj$cqef{P+uXiLIu~9pFDvxzEq~E53l*OkD_A$qf1#u=v!`s5Duoo__ije@pm~=1wM)!^E`FFa^5Z96<$OyR?9eae7s=# zlS$%a8fCULftcG*kN=R~5QAUfnkJNMt2OB+uy=eq7*n<{>#RO7yK1Y}YRZcMTC8aC zi8Uls#^ie9HF0*QFGnoBF9L0)Hk5io|K^Rxs7lT>DSz9<+LgE2Id(k*c77Eg^j(C= z6hYzUPVE!H@-ObUY68?A@G6%H-<^**-ACN9WaLWDmxNfPa7FKrf4<65M<@Lam5l&m zbF9S@pI=p`7>(Nty)Gbf$}3rUb!D@5IO)stAeU$Zm?v5pN4gbUhg@*SDM-F=Q+0uG z(DWge6zW#3b=_!w_D~=aSz}d|uI^L`x=W?66hvrzkO|isT~|~!GLFw{KMKdmV=uQy zq1}p5g4Ya=1^!|uRq!9TL{vX+>>cT1yL&@ANklJ8(bq2)oS)diZCWN0%!!5wV&=+~ zm#|GMr&p1QqacI75c?h4!d>;AoE`SYURwh>ZtmbW1VB||BR)eSIuW(EjR0pp9V5P1 z#GHrySg%Z{a)HlV(ms4zQrh;(ra^>V?ABIjFNxQOm{V}{4|&r31GIl~Lp{LFD?Rlf z4e1f@afmxb;cV9}Yi6^pSihjjW)eMQw((&G3}m<4tq0vk@7ldwM_;e4ygFiCRJEJA zjQ5C?q{^4L%i9y>QHh7^j%nCJauZZdGh(+iu9H#k7Murz)2h~&*mm?^IeUmzAMO+L znGl3>kcb=~e9jE{eCi>t+@p=xcQG%mjubrPG37AKZ}%oRx6|*Q*cX;22r3Hdt2x5F z5>&3GQojicauiUURiEqRRyAJROi1QppR*`Af{oQ)snxj&h zDJu&3Gv|z({RytZO_8YQtboKt3wZm-Tmsj8>DO+Dr5|MI!)dD} zddl=C$H!^v;*+rNX4S$l6X8vC$f{4zy-4|ul-2RGXY}TMx%_79vQ=McU@_jvp$D<# z1?dynr0m-RaFFs|9+wlx`_kg&Jnjr{?TdyUfAa5EL8_duJL@0-tQb7LUcIV4xr&+I zYdiE;WOo`}VF}Eh)y;|3k>Lq_F?$89Fv7ZUulWN^wyo(7M}9;hbr35SHWqKH&-&Fv zm}RoUI#*ksX17%Ab_WBRjs~l0dhwU;+e`bSy*p8uL+-w48+(RiFU)VlW;dyjuSnb`#SaCl>%#s@YF(GZ$(~^-4T&~uJJKT z&hVQtFTjKcwP{`&Fa_i;`8H<5Hjf0T>8uKv#Sm402n=*yq8GjAeEi3!o!(&A{;c6P zXEAAu23Eo->#U9B*F(_7$2q@MEf8g$s1p_QJC?CHvaT5TR(a9)pHr4ZnQ_HG@cpcAbgM`Of<8vCwS~C>0K)*VwWj`um%o&U-Nk^9iAhXI;~mMqDOQ8vH&vLW;E8 z>)ixNet!zd1~h$pNf8^Sd&Dy84euh>!hxo^=`R)ZAov%a+f98LiEn!yWXN+g;1}h^82kcV z@-KB%!Q#MG;mJnJ(8L)5&qlC?jbdJRnKD~%Wd|htM)9DIY3)l5 z>p5k`h4_h3S#597CeKFvV*hf@C76u6GBgUA_rAuZsJUIB=~?-KvP-!v^^sPQ`(W4P zC+@botU=oh0pGpBH7GSa<*q{NRq3aY5+ubqbl7Smj@DN&%n@mZ!w z-u$MlNyur%wBKKnXP5BzCqjQHQ5q-&_0Ll#MwmsTcP9VZVq1=7ONg+o(L++=l-%ob z#}!6#LD!iH@SNi?X{w#bk&NAojVX}P7na3AmUp`Q_9Kd10f+3<-jHhOWiveT{HFG< zLTD{sI?|d2rD)&CaNY^rvO{n5j90~o@O-dxFBZsHbv@LsnBI-5mHgiLQsFILxj)dy=vv;o*a{L`#Q9-FS)z>eN4Dm}@hLpo4UVUnZ^Pe1 z?Uv74W6vyh4?P#?@yTG46QjEkUjJs*99kEme%Ic;+?4+p=IVQAaK>#h2I3{vU#xjj z?L^ln7QcYGofN7CbH5(a{%%p?5WbzHN?zzJB@^3~?&)!D`hj@h%0uFI9S^GTbpmaF^hNU+$p+7mivgcH%@VFIO-;2*XA+FdRhkMn_A{$ z`j0m%e6iNRhKW%`mW3cO`$;BlcEYSF2Nx!IQGUFe`QhR$cU?MtDACgw8fAq%zg(sTwQj;$5&mR1%yZih=Gq z=9!|!m}b`7U&#u=D*uxbaEF7|5-k6KN9mbC4+vO(JJEG&>E2`zOKTL6wu88F=N3}p zH!dc&*Sr&E|IwVKm8^{a%Jv-28uhfD&w2~5%N%gH(dG-hQ+$>Au8Xyp)kr=@qSu#+ z$=*AgEh?x8Y&s*(oY!fWqu-;3!xx}=-pQ;~=2RfwykW2)JU_XloP^J*gT|_i{eb;f zC=Z)SZ-!9gQTf?JxHt`7R4&Y|+njpPH3TVqUJ7O#4nCEl4|C=O{ros!sEI{G5D9bL zoQZY=*6v2ADd6M8WXwS2c+DyJxH=qFSvA^vW}=XR6fA_3-RG1d zt2(tKQTdqnx>Em5k2q=D{To&|ZTLzpxtDDsk2)(Y?RoiTRRj96oRY&%@-V=R7Xw za@Q>ZqSscNwNk(d5CzZ7Q}BEzZ(443T#P&|>l;D3^td4yjIEpdYUAF74~lzO<(~pW zSGH@ literal 0 HcmV?d00001 diff --git a/tests/photos/faces/Boris_Becker_0003.jpg b/tests/photos/faces/Boris_Becker_0003.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0325d3e02cc7378df2d70348b0bb5c366844bb0 GIT binary patch literal 16291 zcmbWebyOT*5GFbV3&GtXc<|uv3GOyH!QI^n1PxAr;K7{B5>$`PM^;fs5Ulw0B0XT}X3bFtM1ONcx^#HuA0Av7&Z{GYjUIpoO zAiqUMMnXb6Jf5fNwsZ*UP1aS>kn0o1Q{A|w1y0sL=4 zc=Ku_3hG<5cj&JQU>v|31VqF)NQnPg{i+@GdLMvQnNOc(<>VCNU3-=Y* z8zdw|B-Hp`RA(S#D3yHCF(qUYUW zfc*#UeQ0h#+6vP35hD_j zW4hCQq;9Yoa7e?%?gQZsmlYUuYrO#K=RAg5mK{7M{()3q0FhtdZ6W(v@S+z0vQ`L; z4|(Wh4W>JXFVNSG*N^Hm%Rv3WF7A8Fc#xxETogzMyXflMJarT(TeO_IC`DXB zLq3jrUZsBlyb~%~EzVxt{!qL5hzbAJ8Aw>0_~4rQ$buG(2|q%}6s4*Db?v~6dQ~z- zwX>IDkx4{ZEnFHw!U_7xOdrgYLLQ2?gwo4^1q2YT9sY6B|!J z1zlZ2$@}$1$-xxxl1bveY$139kL%Z#vm^B~i8TEO*QEy|BFzjtL{v`UVy67+O(FePp(R zqr<4Muk%nKvrnhuQco=N@?yo&p8L(;1%+9=5;w8?D}c08W0W`{ z<$M9)tL0G+jc^RTuJHBHAM6<2A|7A-eq3zeYzg&DLn-|8j)O-`Aam7V$Z~_*6N|Bd zgCA!{iDJ<*=a~=-Yi<3jLeQsVhb=Ml&G5vvrS=fT8R~}f{$6fT)NKm?7l4h@GeIX< zV&Bxyq!)A{(V7?X*<0WRfYRYpGH7zyOEzDRRm1)xRzgG1qSt?#9+TxHxra(AB!W*j zds*q6VYarl{=UKcGC+^#q5?O_sb{+-Qt zhfmJ94rNAC)*&%}T|~33Zu8#dTg`r(TSKRZ2_HO&N{QYuSgb3R7mM^qDC-Is8xdUPd~s1cTLi@bR_yUF-X?Yh zkX_o(?$6z`QPHx>`Xzh~KIY6gG$|>PV((XYKLyZUX6l*XoYH!#L0|pk4jsxJXKip| znfeT7L=7{-zSe*LT-pKut7?#&Ru_xDMmKX>q7omjc}#S%ilM75&eYcQlY41|{{3b- zi<>c?(U_kzLjvDwUqo`~ib*ch2iwG-{i-^YgNJTPX4apIkgiI8&oOYfCCOqNwBrwj zICa^@m|aBA)gqKnBR7tbJFQK33?VYMwI*;L$XUcS*e3{h4~PhY#F z^|U>Mfz9Rx13@4%xwqsga1*9nJ4ViRb!g&PagjI2WjhVeF8y3(hacLy-Ng*!NPCI` zDN$!U)O+OD1kJiw@PogMj+@eT`Vse2f738lo+0{**|kaF*5(f(HK*ZtK3jldb- zR_u8FkvycABLJ9vD2|t*{MCF=8|bnn$zCfx?2yvfTI}!XG*WPM<9*6s2kI0?W z2?ux{tk#Qkg@TxgFZ`=%q1n!u3f;xyrx$?#)WVn;GVR=U)+}ztLDo$!?P<}BcDoJw za+m+?qrqW$&s!m+FOX{6<@VLX9=}6Vc9eyK*?m%%Mju)1xNGXhRV5tyFAzEx`t(cA zi9fp#{o2ElJ?;?S{nmr#H-UtLAj4%Qf<#I)V?K*o=rf4?6vI`G0;2mDjh5l9ONn2O z{d=Q|9>N*8L%*e9q)ZNk9!%MuOO#93BQcTVuk)mW(KP;)NK)#k&$!ysgIJ>1PTDx& zzT`oaZAeiQ|5JbEYl?%$miosb)W36CPUUtZ13ECVf%%fgdPAB0(Qq2ckqVz5)#5&L z;o1#xec;~$p2M|RJ6hkB+EQ2t@C$Q7tK}~CJDwl5yE@B{1o$(ab&5acArk2hZL(%B zJo;eHS-rPiaI~4?R#<5k2iKQPXV0ePP9Fjd2EU8VDmXbdLE?LSOOx>LveIY!+PrJW zJp8ta<)(1&M!8PhOc{I|2Fa4z?Bco%m>))X!t6wuDrKtdB~6aq4O~emC*^hq+}cbo zg^jYs$2+oRpmW%w9uq!$7~Sc6KOVC3`!;Yoc&byFGTDFp{%m92!*n0WHKU0S)Ws+L|BHG9jk#P4}GOrZd#K&g}kvY9xLk) z@?qNnPScFN{DZYRIU&Ayt`0h$6CC&SuEC3=(u}0xLN}oAkA`Iq3&Po}|HK&{KG!6U zkCita&^=VjI&{Mw9+VVp3ORDi4Uy-Os&8g6yeQ)>m{rNM0^a6pDA^FO(L|}U{9EBy zRKU%usp+aJu8b)n*02z_M564QrF2MvHV=@yki&y9ozb;1?k>$YiQVFdR>1tvH$m3R z3Mc2KGtNRrO6l6S=lZf$sVxB<(fI#QuhyPbcX22IqY%1TxF#@{CW%_$#5mfKof}dPB81Z-~!8v=SyJBL_Y4N#VWZ zZj5u2t=fv-%prlYBz}98yEzd?jN6Gl)&G2G!jrtHD;QIL=Aan2-$=bRLNv2s_C8-j zZGm^Xa!?AqbWmhqfrdn^+Hsg+s zGzeG{AokQ6Ey4{&H3BBUFHI9TxfuZ;pXu~kn8mP87-=dl#?1TR6-5nq+2o%CyC0-{ z*A-^;3R6X*EY6KH;+|>PBdU{&Fp?J`6I7fK`UQ>>8+W+^zVk1?r=5h}kGr|W`F)ur zJKNerYDKD>(4<>0X+xUI3-zvySP^>xu-p$1d?-!~&9M;T56*#050X!)IMbWugqs`| z%OL66g$V<5;TBsArwMrtBZ2~A(&5XcGacqm=a#ekUo2yOyO}a+W+x1-Ob#g$n@i>rJ z${G(N{_g0d%Z)CFQM7AGju>!Sjp?Cv>4g^JTB&{tG)s}Iu^Bm7`<-0Oc6_TK8hcVf z$WF`RK274E`?U6Wp6$hgjc{z*n@83?SDW;?Y|Mv_`ugn(KH*z%j#@W);)4v|yq$1f z1JMS{UX3Kl9SV=$WNx89Z|q69C!uNR4p{I(J*_bL< zaD6ECIFEA9(@r_yao)M5n!M(Xk{aW6?k}$Kln>PXvJUHxk77u5BCA#PSH=_TRYxY2 zjUT02=s~0p5gy^^ikjX1UKdnOo2FFu;&?w-Fegfk4C_TZr>mwNpIMGVzO??%lC#B- z^=zT`tvHcjIoOAaeCCNx{m8{;K}W|}!|_#g1kdSWhT$E6^6S)fEwjGT+Tt>=A&`~t z-elvg*IUi)9?5hjw~RG?al!WHuPMiB`Z{B^+Z$5G7XK1?x#aeuxN<63!}W#FBTCYz zi;{Rmqg)Z%LUOdcjQGiaRi9B~iF$c^**e*@8U}oM$jyk0^8{4{l8chXN<-E58C4gq zL2+AuOTg>uv~tt8t|7wr=$nbL&3%~Jyb2T)oD)+QwdEzza=EC&JP?b+x9ml@td=@3 z3XeeovYw_6{ZkL}I4wu87DX|SXDwOkjpknRnhtYSe@HYhm>1^}Ek)rEbf(_XOdpD8_|SVw@Vg6t_vUXe^Asgs?-qo!4*#M|G{~=AE!+c z;@|3q%h_^M-sPfk2MiHbp!ltlUjz+a@c%@Hf2YOV?bi6lKZv!@F_>?4)wh0sZ*^O4 zB5(~ghB1#`iWv!tVB+u#I+f`an0zj>cn#v4Rfi{96ADpti?2ev2c!2RKhCC2jZG;- zs(cNHphpy>?x>`oxJDNg)0$#;QhoK0v`vj0WF~NXo48?l>`)IA?cW_UPE@0yM)f+fJV4zA0@%8)utZ6DFKg(Tx=MyHX>5P<(6@aTPYIN#V*}v3hBUoR*u8`b z-)q$%zdiDY>v`~d2}^|5deM?G2ey#7N43_T%gz)-*TM~}<$pardeK(sj z8Y9o#L0IZ*(fBUGcg{hWEQ&*1R6jU5q#gTFldo%99vKL{Yv>G%xSU&G$yAx`-_ZC* z-OI&(MMVNX`S!QBwP=4CGufc-&k}0v!8^ zxL(P2VMvpXhQw2_D)jo-tAuxgB-KjQJ4Zgr!9cvW#(%v@w>jm59!}KL-UMZ8*@L zVSdZ;AlXC$V?cgN_4;Vz9Up^=B2LGJ@XSEpt(-23PesK|VifOu+usGYH+ud++}Ff1 z>Vm(5a##zDCGRQ%_p&P+ntm>Z`IqOf!un*7nC!*A}CB2sk4mpw2L*v>{$V#P0a%$~Hh<$-e zCoB#ld=?9RYRL`%UI1vT?-^1vy+e_`oBi?R0&Lu9RwsWYv5t#VZuGIily{?z0GR%@ z$$=1qwQBmRAsS2L_0XL=;HbH0@|AY+szt^ugGF+q(Pt5*@g^&dP!{5e6 z20R~|UGogR4kWg;^y{a}{vcRo$v%^99q_8XmG9?ud(;3@N0GoyE+ z^ejQ3zdAhw9gAl_hczz~QDWxzRrYZR*fA}%>Bo=!73$|oFw8~U&~=a0LLI?WJ*aBn zQDEoW95~!wPmX@lT!Ut_{vt%uVWj9$X1MY*jKT03Pr-W?5lQ7a{;KwJtE;%%QqzdX zt>xhs!~yd<5iqE7)S@k_PZ7o-I~PTE2CBv?I{^{kAn%5~L{=eNrR2O3w~F3FmJ|^n zb^cgH7y={LWkJ6J zo^=kDmYE8<%08@UVbW{FL7bpF+C>y-br}t?r zuhgkbmRZ(VW5=9}tFbV%z0fyn=?v%@E(Fs1d!3d-h?6@IDQ*OeJfy}ti~G)%7OAET zDYnb9_($e6v6|ArhQpIasYW=haalA_2oWy~G=D1atYPZAzl}0~km{ae+l@HeB2Q;~ zLSfx8MO!Y33*&!tXf9&zHYjzj@_XI|?1H=6FhbwtgV(M^kyBsmpr!D(Eh(Di*Ax^e zsukWQR;D0m@7;tGU4+O)Sudh9L!Y`A6P9muMQHj)#Q0C$kaw;BZT|~k>1;P_mGIBC zD6rr` zG@>5)5$*zu7|*60uw$0F#M|=wXOF0u7tU;?*(UjA`@+u+?wP(j&1!*@gd43|kglps zK0q7meYr36XwiaU@xQPX1_Fcp%6*&0G}9N1k9qOA&2=xrc2;-zF0U!(?#u(&`N$j2 ztI+>lTiHZMCUw07xhQs}~8=43X8m$MM0VLRL<7ee~|)Y7fK3 z*!_?hGM9ww?|L*!*X#V-3Qnr9DUY0F!)`lO;T+fMoL1E;t%xG}`j3De5eK%0l}W|N zwc#s58x@!*SwlX@A$^SOI|T?I@_6XTUuLUU?gGl=4nOl5SX(;r3mFo8o+{41K{Z1x zI6Sl`Fu+60NyEhJ3to_7IkXYxt4mrh%%fpW*Uhc4@x)Vga1Yy8^pgvke z7segz1Su-}#Lh?Va#yt!!bZ13m47SPzu5a~^#Q49=C{)tC^$}LXm0t=)5S?uBtCyi zpynJ?X({z3?{!_$FFPB`63zjZ94;%`2RPkqTI|<2Kyr>hcnVp_vSg>OLWdnq9BzB@ zB^GO zdTi^HqGaqGp|eL**DQ8u1X9j=)bJFcW>upGR4wuaV8ygX(H1wsMUmIr?)8+-`|Fn< z-B(lBA?P>0I&pauF;>1VqR@Uv!oz{-glN;Q^n>j-07Z}k7`mQ8Feu>qcFHYlyYK~| z_bcK0Ul-N;rCNeFtovM8qeCF60En1FuW+%jJ#BUQS2@M2Ac>(SZ)gQ~UpwSYmzTo3 z-Ob&HS;pk?PcCL~NN%uI#lL0-Xug&dOiA(c-P!x8E>=Tjcuk?;n4j9!Kp<(&gl_{i zhaKQGJ}zWL4gPw+ag(W@uo6=?wm72oqr^Mt&IKm(={;d@qVhs0Jh5oqa01|ku)`ul zB?S7K7y~xssdF~^dSrhZ73ZaIx$I1xyg0Kb+!?Flo|o)WR2IpHsjK;HKH#aLmvg46 z2G`-lK9bXb;WzvtG(3=vmqe)^a}Ya-AYIp@KczxhSB{#S4{$jDZRgL{1gn-s*qx-V z5LGYoSZcd5>QsIHl;YcCk+kz3Yd$Mb2qV=c*A6j{U4& zfoLg2?cD(`wEIz58DC7OtR$3 zcisBUULxUcK7lx?Z5Q)GQItBKocHm*q=kF=ng4G1)pn;=zYkuBv)o>tk|5+u92Q#j z29B>Uyhu7_0mrN*`tJ8gex~W-eE@JkC!gKa^8r0T(Z^#wt+SWrOKfprc!uDC7;A)7@#JoL+0Q_OrbOc~Ol~>yW%<#eW;e%= zf>gPzh=cwaF^}|Jr1j19dQMEO7umt+^;?M&p+)`T%=t?23>V*M25?N8fFQCA?%!aq z`9Ot)oqqrHsEV>oyd`-Kdb_ua?h&piAPB%7at2jQ zqrfhP_UROc57NJnwfT(gofO$uTANe01#zN!M*i&TdW|uFlRQ}ksNKYGk7J(% zFs0RF-MuA522@)0S<^fdu^1jVTQH|!7?1+HJd!YyIh-=8!uk$6=al%9>fAfRx#I4_ zcj2Y1_Nl4UJY+_l8k7r}1VmF2Ca5K@DV2Vpi-a<;ziep_8@=GjfM48fJ|Fy<&Olmq zIfIE{>R|u&)7u&}+)Qp3><{=#ZZ?hmS)@SBqL*YHtDTTwKOK>;%H%^qd}h~O8)gqdZ}H-~a9 zAnD%?F92ByMWY3g-_U*KZO#q>T!uR3nGW8G034|Zi%9YLxg(+#;=D8)Dc_=Odc>~^ zQ!-%9xn<~cX0uA@P`)^t>+r_fb@dB?U`2m}^4B*zYYgct-K*Mkk*^q%{T-ct3*9?T z{E&W$B_AZ-lqOP0A)lZ^=G?97&S?sFqDYifgUw9!()#dJ{D9UhUnIv;k@_x|Mzia= z?w5SmK{u5t4cRirG9{!x6zQc$Kr_0X+st#lc&|Ft3H3GOb2a{b3zqUb9s2^-c1q4H zjW%IZ4E@IDNc~8HIE*KUGwe|QBBdD%_n-Q-Bz$+%MZQP-#@Z`tmKM!g7xLJS5Xc6S zI1g$%B^yz{1_rhXYbv?c5?wcfD);I=zNv3~pKQ3RRy%FcEZEoDH~FVuzHh{lICy{P z<}38Ei(MGAcRi@y^anQQ-s7b);XuhkqC4kKe0)0HGHPU(1H;E{LBU#p z6l3wLv zxm?dEFS?zYvNK~y8dtmB&K-y!tar6;eULVqmg^nEH}F@;_gZ@a^r&`dmHU3>Kn-i$ zDm&XI=(J$m@V>Zz0M2%!OE`vn%O`mcZd~BBf(F;awXh0sY-4U5UjT}a;SHTc&2ROS z(uDf1YKHlW13m*&$g}+{dk~`%n!P}ULaTmYoAcTT7rT&R_QBMeC3`M`HCqOX`4vG9 z_5)18UXWG}Wb-*=d(E_OB_VBFWQ|LX3*DRR@*1Trm-?n0UQwV#mPmCpj7mPR!((wM zd!IVyq_%2ouD+0|m1fpMc|5Tf@d&w7Tj#qwgf>6FV}0Gu;Kr}%2xhO_O>Dv^7;qcL z3tT*3$-3HlZG^CZ7GaD)|Bzy{1#fJ0KQ`1@)O~A~;7;v>420V1A)XrE1N*AJZ{)Lq zZyEwI!Rc(@tqz|?d*`1CUjP;y{|Z+{LR$s`pG5sDTCYKjp2ka;Dj9uu)$)Sk2Mdk0 zypc^!f+lkQ*FkCu{3k$bvAUVo<0k<}cNEg$orJWx-Qq?bPd|SOn$@J1j)^#+pW8l7QFadVT2R=7rF|! z&)^%C7eIu;L0`rmKHix1Cwn2L1Tc(nX^Cc7BoavV3|@+oi_e@&*QmRi1EM|Ue}O?6 z;`MeqoetC2-9@ORFO8T&`Q)$9v6p2j*lx!5>hs)Dg=%jW%M8=9x_2H;S zH;V|326)ud1Qn{};9onqjp1x|o@{apO|pZ*vuxR#Dw?h1W3ExDXF38l&j(!4N;{n) z0!r`lE^rV7fQ`1q!34&tH`@@+NRbK~yRfP_vlu}p7zmM+<-*7IUbKta_8w6=RVnX+ zxz?#Dp#PzEl(h&(^o!FGxpIpnzMtK1GF4#3X_gpQ{+E>rBSwhgvX<~lKLaE83&34L za2SNAW4L;g)zSK0cR^NOSJ&~w@S93CRypw*0a@Su+QWB=4=&nZ@0h-~Ra&a)>Hu`tB9wdyf zt7MoZbs*a+d~`==`AO~jh4QW+*w>M*xg00u@eC4zJK|u_e#JA3HNOO~vB$9z@!M3v zxduF3x-NY>+a9m4tR!%3J8@ZcNT$^`eUrx%16#3qD}UL3Orv+iwI=_Z4DP#iB)CR* zZD;O!ace_ovyB}-^H+3w(EC8>O^1UaI@KPf10)a#b5WY-z4?@hp8S^#s-mUYItOOb zvtWz!|7t1|R;F3lIA0xjCahf%h;@LyUHF2b;T@`-GnKiyE-bB>yb%Mtbul$o$!_Oe^s(N1NORgh@Ry_N}koA5de z6on~y>wLYz>1Fym-O*5AiJl8J=TVoE&|;Q@f+0otkYEK_vnt-UNTJ@DQ41N$p z&L(6$%V`?if}ysy7YsfLCV&Z>7)ZC?!J?|&(9_l^^$ZfT#;>G2o3%)K(9g5>z|>L8 zD%ZzT?u^+?Nv3x?Ewv%hI@b^Fk-HX?dJuYhL8=ZTPCrV4zKyWpH=Q3KrCYi8?|)!W zk9Ml5J^c(s-DVE_?&P9A<4jyTGDs36Kj8ZU2nSbXW18bVSV`|d39lBi?HJ%;`~7kh z5mG{jCgPmLAH)(X#*St*_KcO|$!|=^-V>AA^s}4eMMaNCFTDVU9OkZG0N6**gCVqbW^GQg`IZ9RvD2k$gr?f0-yY!XG;>TM_C3J(p;O} z=)GUQf6O)~X*1&b>r!XdMJ=C3iX142q>} z^TbP4Jr32Ndp#|64eRnPV|iSBzWF$83$7Jfx;iiYNIpQ_vP z5atX%%GyDu$pcc3Z-eYDPri;G_4_;&Wg5E!_<|^JI8D;@L%W*3lwk-_M`#oKyyI$w z4Jo$U%C~hun(;Gj(u&Rfgen>XR19uBRYWGa@fY1pReVEk{AR(@WReGBQXo@ka}T?K zRHRtC+hz#SMpiBF7RgewL5+gqt+)r=58BrcoS?{xOTqE`5CH7(wb|#+l!0`Jv(63PDt}{tSvDjxrNXTUVsy%q} zUf>k7&@PV`hE?I_-0bGuR1cmcxSrSxu7W+iz`?yvRx;iQ}MR`2%+EqmTJ(v*gP+-Km zROFEHZ{*#|ueHW&>npkhvBP{Y-JIMzlH(o(e{z`a-)45_qnu=8AZ?x!u$VeXE6I$F)SwZ>l9SOk z&=orxGn?F(oH!e-B2mmM?DF=|6iYC)XAu}iJHm1Z=<{FEQ@d#583RhRaH8a-d~?;t zJvK^7mjF^JlkBZ0;SHt6*mWd4qn(4?HVFH#Lwy9}Pd`Rr+?>n^XPEw#}P=+^1;zNwLh7{{RMDp-f-shxX&U)Rjlg-iUrl` z4N+)x{~BUCR0yavs!UO5^+5zHYZ*ojf^hOy_EPwNA~q(e8OotS z0&p|}Q43Jb*ZX6ZCeBWw8L(+ayhRmH`&VPDQtzZOrMTM|o#yy^yu|L9rd7deX&BQYWyKf}5A0gKuy{9AztLzX2xbEJNS+0UpvzMSxQ9VhkD+Bb+0pC;iWn%`9w#3DKsg@765}HeU z&)H{}+C$gyy0UcaD!t`mFhuvX@`BXH)RrZ-!QBH>v03Szw$2T~WYcm8P>j5Fe4Tzd+nEAe6zVg+ddWHE&+ zMuWwi3&|*kC91g}+~qwkl9of|WzXzdJHmtV4^~=Mw>@q0TA;Su#Gf zX^tO1ldJ|EAIQEoZc9Wh4?q8HvfWI2KC=4P+sV`0^&zkKh{WojD+%PQW&~-#w0P4u zrkh0Pc{T&r6&hy7k6NM#D}ap#r;Y}su^*De`>q-_i5EY z;m%$Pk{$M>{3Pn!peCSNI}AD``714`>NJ%e;i7)G1#tM7GhicptnE{RFa z(uY9)<3Dmu6^Id^f?EDi%?I@P*ojri3Ea<(akla$&!Ub{X=QZ87g{7I*pvHwYzlx# zf%M{g)y2J!jjl%j`RgVx?mHx3QugCAcA}V47fb(|4ROGL~N>ZF9P?Cwkq>&RpG}7HvsEt&o%J#$gvM~pI8lhPNr*G*2G~vc`aR9%CeNH33J7ix z`8a$rAMQl=Pr~5Iqb`%Tmrh@CfQ;pH>g2q7M}VsXEPpeq-wax`?H#kNLA^}VoPS$K zRe~jeBcAzKb#2jS!dVZyNwurC%b*Pq>Ay64e^Jzy{2DJ#&1gLR7BiQs@(Nz7;c-ND zPU}nRt^_-b9pN`cu!({5osq}hcq8$iHqXusnAs{c=fDy@cjbFEZw0-GdFo0X#@PigHtwRo3kA7y&{^|Pc>#gZ zAQq0H(FFJHjgj{s=-yvfiBmTa-v-c2?>M`b5tanmTK9dRW=ZlZgLBaG$vSR2vYxM3 zbYgi`S98>CCBMZpXu5u;KV!}G%0k0(NAU-icW}M`QsnGkS50Xnt51BZXgno}%GeO0 z3bdotEbv{Un9QxnHvGBbJ)0a!DYs*)`-lNQFR%sj3;V>b`ni9^H9@%ie5U4qr@cfe zR!scyRB%%ONr#}r#;i4_XGg5kjYMyouswSd=H92EpWm3(6k)f+iCn#KDN}J+FHN=# z3wES%6|353*QNPoB9_~4Y1H#vclqb)8GI@CXKEQVAc0zPcBv+Xk57i>C3w$xv(@n(V8{LPiouaATlPBXtH|M$E6$y{35|4)- z7@7@xBs=eaR@%!iXwq&Ya@aZFX-xRDC zT?z~7pCUf0S2*BVzmr#!j3xw7ZLs@DH2idu+3~zDmgK5y%-iH~i=z{`0#U-^CMxXj zGefc`TC$X%jN^lvkPWgV1jb8*R26OaDf4zz>U4V&cv%Fm=@%F)oPWC)kzUuVyj?x~ z#?M7j)@?t&pe`EF9ofly5_rz4-I~5@Le=rWJ07(U+*vMd#V)oVp`oS`i4^uww^gW@ zNqUDB)oFi3&dHXUI6j@noH@|kS`qTj!~m`7Vo?+*5Mmz2{fBbCq0lt*M6@FKMvxG> z|4SIA8eeYwo;6rVVavA!0|nXBglev@0-O{^eHMPKll8Z){x^9Q1#x}Hx}c>tiVT&o>;34Rk+O({xvXoC*sN4{r8&29AOW|4o&1x6Mzg6yR(I;Z|(lfu(4 zd#DKJ&q;^9n!IKHNrI+w6}>x?!2OYO?ia%cAhD{e#GBN{CW$v4hCz7MmMiZ8*Z-ba zFL}cZy&Vix_nq{T-q0&hv!0B5Z?h;~v&UDYo6 z?v^cA-<|{j%Y!Gn@eH@322@zpQ)HExi*Z)n-5Oy}4zqmgxh zxcsxjWIEKldD(OnCBreHPYcZ&chet*s6QUu&(I~vfedcD z7&AQjx*KfvLr$4D1>i0)AN^VLpXg$ryCBX%gp()1-~+byb*FmNI7dJ*O4rg;+cOz| z^X93@Kswi5hOoySP)d##KQH7X7`oQpQP)u3@BEr|dj@UHbh&VqogA@J%Y;{)z8Bls z&->MG<*zOOt=XUs?*znlg|?j_q`mWGqpb6`+M>COl|CLVi4ZwR?M?esl1L6_UeBLziEp zl&?fKz|2bT?!I)?<`_xi`mSsuG6Uis3n)1dan@ohX&;KC zcm}7sy#D{=M5SlZ6CiCIbXSp3dm#(VF;qVAVr=spUFG53=%3h_sbsTKuDYss+HM^VP@YZrf5N-CNI=2KyF6~~%I*?yic7xUsYv@80 z$PUAsvSQ{&7pWkUFGYGI?iL3md;r>Jm3HXItN2mj<-v*zuf!uI}xkGtiwG;Tb<4dgpwr> z_BFYZ{2}>31jWYI^r-HL2^g<&PHh6uCi0H2OFtETo7qJ!*=G24%yD07r_H>xjC;gl z`1Qd(q%)t=7u#^*I;I1k^0WrL7gwF`BXLZVF(#zQhx%P`lz4~y%ZtC%rxe(3xcL=-Noz-%Jvpb1qUp6BXNPZAWyzpqj} z*pvjZTY1o*Gw!`BcMWp5fZTZI8HU6xqs7X_W&c1b!cA^32`)S!EB@umGt;@swwkuI zWxib$Kp3b~+_&vS`vQ<^Q(LnqIzbmu6%K9zjfD{Sx`!b97HR(F;HGcPj2aiN4K@lU zyieZNW%Y0hlf|#QC4@$umSDI(+K$`SWi)CI$a1?1Lpa1)6q-D(W&@}`{w-7>)f|tX zyVvg0w4hJc(hGG;khT5cVJaAed!GO1(Lp4T8HW8ekX>N$(W815wYg=zu0i^j%V+OR z3L9#JrKd~@n;M`2e8Y)2p7-vA_Ce(6qv1vhU8g7Jk9A*jMUU{mkjTf6-r?K8ZzlX) z-8VTh?l}}LCalX{BEH#rQ8y8D=7~C839K6S??1P?{tnyV@5#>)gBgMC1sSK)6enXl z)OcY6U->941`M{B?auAk1ZFuGlU4D=hC%~kH=Q2wEA)ILEphJbH!D>Byn6us!5QMV zZ5Sczn2EZi|AHMVI>wG% zbADd!j2oxwULgNoV4s#P|y8_-fWoLfBf)gpb{>Uw_p57$~|;v-o`7_V;)h zz2EY+)&k@N$jQRx)FtjzBo;SltzzvEm#zG~$)F8^VgDyq(tz53Yk^EOn^ln64t+$L z2Ob1^Q|9Be+xvy{m@o2HKuJ|q1-^0Pz&yU@m5UcZF_T*PYi0=`QF~>+HBRhbGIJU1 z#1xyxQl@Ev5h}(L%#Td@_Wb9VW3U7} d-NQFuJFaR>aY7kIz4<1n8n-HG{dr!N{s$zy* z?b$tNf9>w=s{Yf})%WQ>Rk!c0ThEKn>i|3j8F?815)uG_^l|{6R{&A~MMXhD#Y9I(dyR#Oh4lvW%^PeS0z7OSe4ICL@QCs7-x3lL5nyFQ1DR+Xn8)Oy;U_vr*kIc4N5F{ zP5-H`he&PajDgR@B^U#fn1qy!oRNu{g_VsTC?F^#EF$$;T1Hk*UO`<$QwyZ6qibqr zZeeL?eilfG%P$KGAb!KB{eNQBQxt)VNr2OX<2zieM2Lpskx=Kt+%g# zU~p)7WOR0JegO(wTw31P+}hsR-P=Dngr8qrUR~cHZtwoXg#*Q|e}nxWT=*|s zuTW5sQPBUxh4jkvr6S{_pwjZ75qwldH+FtY#~bvT@Ka(zT@MC5pV}FbiOUQoF$4bw zBm6&T|3&tH2NwMQMfShI{x{b$01FxE<>Mja10(?mbDP;Xwu;O0!hI`V%099Jk@=R< zd0+3J7$nKMJ{;0Dh0E-`TvY7SPb3IG8&H2@el~wWYiOVc^Pl{N(g?M&J^>y%Brc_hqGIvhn@GZXo5Q zr4p0uq6(F>VF6d2b+JvNv}ows;z;$y%WF!H5ff)n`)QuU2dUhHXY5axRr^aIvwNjZ zX5Q+Qb8#`OpV_j)7+{TlmI(231baUyaR@7E@R5E!HjsCL}NXH{q%ln zv29MKHCba?E~Flp7e;AG6n{3gVS|uVoj-=i3GOP54MEJ=4#>3;M1Fz+ZQXOB?fyTz zW_h$A&j2(VYLaY{v=d|;3`6$I>b`69v#Zlc8Us95F28A>>C3VJ;KQprxKW6?o)yVd zgt-hGF{6Mbi~d9RCs2$jRfYL(C;EUSIwY6v`j}v;OX^3^seV_%M*Dx!6 zsKz)YmM~zqLP5f2E?Ff_sKhd+w3SHab21A+ZE^cv>rlR)5q}y-+HSE#A!_J*BG=sT zHTY3jkQW9Ci?XM7;B1-m5cz|Hd=4w~GMsK&h2>fCd@C2XoqG^|hbic6RAfGVqk60C zEbNk(zHx+CQIr$455d-g$u{7e{7mr4SJ<+L>|c(dC$wlMn1O;G6c8nM6PMF{teO?CacQ z+CX#@u4JfF9_#PLa>q9gYiPFc7zYWkcc+`=PnU=4ILst@8%taAkgVLjRJ_M&A)%63 z?AK)IKu?v%sL|=9mW0@)GUcw1^s^mtmYh>IM+J*D%*eO%BRNjI{%ph{Y{+3~kVZLz z%}2f)`Rd4KWw9ysAf`L~DuRDgX-+nhoY@|tpx<7$8w*V}78GNvnAb%8UPl}D&>)Uv zskh!#KA=pj-kjpmKk-tB>bVkD)Xtb2y;r9LzLMP&1F)8t&cm`DwJl(;}MG2k(Cw6f9oV za2tihZn1Aa&7~|?Hu0{SbT)77{kw2p7v?;1i`5npz)*b#9MvR_@2gkew&fR?yGwwT zXT*swdfH?vd7hT%qzBs148BjDS5Zk_DFB*E({Q%GfP9Qrh2BwO~(gXF=8M$z(ShICPI*Ef0M7R#&7p|l}kW8ii4|-0JD+1HcqgAxkvvqU)+pN3$Bb{eK#QC0aM1n-yCeN)I zuM#pOE&|H)ioSN4sR6oaeRqxXfE!MgB0~BTWwufQr7Qrp%@M&bRfX`W^3&jC)^V?Gn;-HVtDqk)Z1XW7PULhE^n~Q zohdc`=WoG|343MJolx{-?NAWV24owCoyT&IOl>PSK(;mnB@VzoTl;O#D`s!)f^@h@ zw0Dz-*D_s{hX7c*Am)SJ0^qXT_bG5sLVwG zYCWEeN}3_=!bGeOh!Hn)5Hk(F!Aj1$f1`qsH{G&-`}pZga?P6Y91tFtxZWwj{==DZ z`onSY|O zNz&KTONUr+Ch!6(j9y>5I(o#&ycUakRE5{{fh7k;DAQng93j<6^bF2MREV1OnKOZA z*5+_KhUBa}%2LY9Sdi%sAGtyi4bxtU7ml%Ki8Lls4wCM2Z1c7BAwyH1Q^rFT=pBgc zWpt@OO)X>eP(zOH0wWv$d_oQW_de^ld()4oyR3szu`$yf?z0%iG0xE}%_xISPIZ+i zLdnA-{Gj~VM#dH++-l>56e=_yEod=YR}w}a8&<&(a8Gx)XqK<9UCg5ZFU3&NCpS>oxzL!0E;3*LHB7 zPZIE|jgx(!tNe(9&INPn6v9Y3iBze6L;mKvStg#*nw=BQDFyg@lzl1hexoKjjpr~*VkRvS9=mqcx)m6a z7VBwUTJ;PND^oG@LD=@J(X~bomatl^CX8x7teOH3DmUjgafu#_YR|U}%3LoiB$t&^ePF};695q_`7eXA_=3gTwqVpcbnpFs(*O`ij5 zU+G2eSWqiC;ktspMSESS4#&E9S0vf>jP7|CA2FbVgx7?L1`$)e&^&rAy@(%W+h&$T zigK|0f@xW03SWa#WPdcG`~}k9maPb!adr&dom&hrFR2z9O$0@;Q`P zq1700o)m1ArjfHK;p6Ms%ZzGACLvK6>qGW}V)vR)?s+@K$649F4MXXh^D4;L=yA}wEaZLcLVr?pVwBU?Aafm8>_ z_ev^+jCCq2e|*)x&ky7t%4l5{^#w8i?568fWvc~*9T+wcozQ*x+_KCrFuo!hefriv z>+I7i>z-$^m6ZffU^5r`ptz-9N-gA6*(NVph@3c|`<=sphVyf4@oedf0i>3-< zl~fEQSRhe>tB`mN|(Tm`tW}LPqPztT-DJ(?X!yiIUDmO(5LKTKH!6s*IGEYgcG}JEdYqHSt1w zya}&nJjN1^IuMufF3sd$Wssp|HKNOx%>E>^fz(fOwmqH*+?8G0@$=*zHT!N2`NPLB zmJ_>eVHru!*<`=j)+Oa^)q2rW5?h`>lj&cDcoZ;DK(U8i%$-g43M|_urB0NOHvS|7 zh5;E(6Flx%3doBZP-<$y<}b5MS1IdzYfXBqhaqN{mYIma!R9Q%tTj`D+1NgbtHjDf zC0{X^(~h~dsK1iW???&kYzw|6S%~(@J5S(b-`VCZBmKYrSFUsA;Q7nCB`gs~_3$Ro zx>mlyqVcf{^5X~6HT~#4`AEu}>aekm&OYNcN&%70`u82ozCRDqtmi`vLCeuun!j4@ z;$40WWqF41i?RrYw`P4PcKv7VC$-t&jUpqf=at_g2K%m5F|rGs-zjC|&U_-Tv@bi< zy)GchW+*|Gbh|8phZ{xXx@S5<#?5_mTYniv{QnznXeC~+T3vx&rA-CX&dH7Sg=@^$s z-Cp0>1qHt1oJQUmntp3!TpCG0f$DT$H%tVlIkw#revqxk<}tF75#)`_-^;o>b6;gj z601mgyO96>$TMDSXQ^JB_|Z0D!BAVzG(XEQfA0(oHt`!_RnJehiI>WQ{BWY)eL_vY z2?d2;iow9D7h~!yvgxM0(jW*j3Xj;FBN=|LeJ?7J_l&`JbYZc1-TDMPx;;;#Nk>;}BjZmM!(rct}#JjK|85gDU&p-0=!V7-r6--dY!EzirH=x&6v<<06B;a{d`oJ2vM z8;@E8>yct-4apEzALLn|~My`pF>rIAhMcixOO3MP1Z7 z>!*o5XcUX2Td73kYei~~6!{Mc^tc2eOHns;Mo-uAR^ zR&hS(tapNaGF-2uLaFqKF1u<9J~Fht=2@$C`1h+`EnyJ7Q^SqC#VwkY}CAu8xr z^X2UELBvgUJtl#h?RT`B)j<0w;KlAnZ?Mj8tcj*CgHCo^*&JMisnZ48-!C#Odh!^e z*>1m&XWZ-yU;*v0wcC2OAeDn`;LgL{7_{*)t&4!&qId zbr%#RX4{{*GGuJ)Y0qNU1Ve7t?VUR(P?~6sJlbbdb%JH7^%r_sPDm(9NBxhYqft~(=I5ZsFm996o_}bZCKy>#O&Y5%eXeuRzXv>0- zY=@zPiQ=2FkU=&xERtkbwF&{QBET{NZ) z{;xM1Web%BMA!)u#)Z18qd33(iY-0RpLce`v2|4Wj)&}`#Ba?IYXu}LvP&B<)GKBC zDG2EhhLcB7&vkeQ|!C=&Z?p<7Js2HZJy1zoSs*GrJ zRzH%CNMo!+Y+PRIthZ_batzd=p-6n7p)J^nZ@_I}-(?}mG7a(Tl>1nG!bPb>?jn-* z)`yz?C)~`Xinl3X$JmRaAEWG$cWENH+xb)n;^Do(zxa=z6UzoVL0PpD(cM#%_6)EQ zr&i)q<6Q-S=?D)7C5LNj8q3p$n1FrAe<5D)kCT5LDXf&%eSCTv8kH zsH2b-s{V9-XS>IOBU=hlsazX#h2(RTq$}02n{&?mbABQk6C9t|tTOMfs@hQ2Mvl3I z_I?f}tk2oXDnPBoi2jxk;DBn+mF+V6njXcXb(z2Qo@#?A&k;vTd^DpZ9X4Cr^$wx% zo_ODL*)zFiGCP_6_bt((pzc@V%I`#Y#u`{A^@1FZ7p$f+uL_I(wuv$iHPXVz$!WSV z(t0?rDLsCPd*_zQ{*MpsCJ62AL?` zdFzj;k`qx6UB}(cra%s}HrI&&F%_SNIiwj%?;CB`VcKPDj!N}Kydk;n@cKzK_(^B^wAZD<=Qs=gEev!55 z_{5|K?v8t(v-h*=ca%uS^$n}7opWbXk~n)z%(9hpq(l8IwI_KCBcm&Z5*XKP_Cc1o zh$!YW51E+OiftP7EMr^$)l1BpYR1nT`*=DSYS0oN@K4iTaS$fOGm31M*aIe1o8QW6 zE!uSDD4wY}%+v4e@Rc6)W zBHI5NPstcpVVFgDdgw%ZG?WjfW>1(4{P(NC`LpH}@jJ}j0IH*;iHb%71^Fc1i@&kU zIX7#JPKQ_mL!N6#aSI3Q2komDw)Zi&>-yO=!fjMTL+vnRT+6{FzYPRz{lO5=h(8U&-P?<;%d!Y{bS@KFEEpa3aLs=9<=@U`2Je7Ir&SYH=%bd zCh^@dS=o!zn!nWWgfCAEXKKdHEla?p$032&loEUQekc8oj!eaRz8KCwr33}OvHFLz zw_<3p`96}IIsBtscBCt4=m;GXj(@cg8;$R>ckGjq2!agN@lV9fop z<`rJMc%}|J-@(!|z*YrwahN{NNbHi4XX?m+=x;ygo6Yned(p#5YnGuM&Bm~JuwQg3 z!c}pOvN2Ka(`bP82UKUn?2Y&ckL#X|rW9`q0sCd)%}%sc`nukOl2g~MDa!q+La()J zWU>VB)39eikXG7vuF~|8C@G3&Bo&3zCqb(jrA>>D27Ug~8pKvs%{{-LNE;${tIw3P zw{Rx{wC-e^kFTTpYiU6yJ0)=y5%}^j+Bnh3ul{+TQJO>pG#3$-y++@rH-o-<#|Xaz zEl@KfLPMi(Zp8>BQ(nTPho%0Ou$Zo?s{Z8JJS7C|l`&H*ogISeC#>|mf^I^7TBY{PnT}B&hIjJGo#Li`r8&ZCe_u(0U zeMxbTFl%e0?Vd7v>hvTgUY3Y1$ySuwb2#*8dxS<>a%6WT;X+hwix~hZ;cx!Z)#NU= zY#h^P%pY?WLOAHQVe;$EOU7{v<6fD|@}tbu3owN8;y^KRf@xH&&f;uTU7y!3+TWAt~>=8$8hd1}3wc%KL0e_@2w%#s7=K*h;etRy_=p!|!k2cF|SMjXUt=bp)XkG+X zjB)3$$3~TRs7pE|x0Jwx89)q2ydm~F0BB~7A37D*=y++d!eW$;-syJ)w^Z+dJ4CwF zZ#hxP8m3;huyOV@42`oDYw<`}v^w&ty*eH`(#*unS!i$ffV3o1lnL!-Q>9c1W{jxo zt0@D?d6h@E?Deqt)|yV-2IFSgzw;&Q)OMRD5{1oW?YkX}*DG4HQW!Y>)-5LP9*F&J zUwN~d7+>t~c~AYo*%`sq?rk4p*;^Uk!8hQSiCSI{TaQy|shNMYyMqnn__{$8;zg$3GNA>S@6i;49%DP)9XBwJ{f3KMlDvi!75pmt02fgg zd?d>JdLn1jm7MXAMp5y=K>=Px@WDulG*svr5KaA%bsitFM(8V>w#i`5L+LQ2YB>5A z|Jcz~uJ4sWz`i(pIz0FI_ccR7Q>$?fsP}7skEQ~8L-=*AW4`IGzw0MHE5Z6M9n7~s z7GW8FBJB3wPCA1ba;WKau^{{jz3zC5B+PY__qQv*M9aEplzT1QFSG%bt$(9^k&wSbQeNS~*(heRd z?S+3Xm=va~HHBsY7r`T^8t&dR9Lz4a3FYs!Dww5`TBq2%3MyW01C9rLN^4nttz%weOlbGcucZ>w+5ub7r;7y66`a)?$lNud4+OjQz{{+8IfbeAT+@ z9f}hyIgo1j;lJaV&t~;8T##{7vQ7%At@9`kSp+8aws4y24<*fl&dW!{E60oCk9m|R z1U~2TX}WU;*sI;?Y`d(C47?1mm1HBwkl+wIKVp#vumN!T{4~;#nB14KUjb($IcnDq zBFVI5?9?9bYFNB=tF<{~=?~dzkRkpgH_1LUz(`(N9-f@y4rKT!5+;NtI{#In+@s#1 zj};IAlryXqa$r}Kg+!ek3sRo5rz%l>Sz6$Sb2qiB@m2|%be%m$_^F_-Wi>s0LcfnEga6+4l?}@1x0VT6RHaZhZz^&83{g=(gV7nwPOl$9A)Q zw}D*;!;m*>2=r{p86o|yl@1@Yk&kt0&Jy(E1TSx@b!>*}b|v`cc4Y}7mOAB$-iJY0 z4Ag{Z=Nn%`au{Nx2MzqED z?P8*DEdR=51<cC9U z81P)z3Q1H}dWSWb|K(2oeBgs4b(^XJRKFLZMQSYw_;pwwpaI??w zOGbVj#%y<_zB+#fELG>UCW)g*scDHC5gV==^v>xC2Xzm>8SoCvFP~tX9}&yUpy}{| z*>g2hm*bf&ll&fMpAu7XaEtEoBp{X%W&ll3D=}n_?s>XaH@2_^%Q#58L9?P)79W3_ zw;UO|5*OXYrOkFr>~A9*q<;qzE;L#+r~G&))KjA-N;2Ues~AYqUZ0oOvEVOj)Y6)P z>hYN#h59Og^L;ohxrd=i(1Ps4uyT@MTNrdr~>|O;vutC z^E_yeqG1wH#eVu~#M#S4u1mSkk*Kx6x^3XG$Ytq}_(aasZQuYt~{V4$#t{ z;ywcm>`5ul`)B`6@i(1er73?Ij*fGY!5|7)fPHD9*7(J9Er zvo%aI*UPYmsjmExXGKT9)nM`O@Gp%T@BsO!5zbJ1pu><@0l6x@XU`$~CK;tFB5mf;B>;K?uk{WZ^b!MHwW@5u~u)pgT8$O~@)57##=J-iT^)&)sb zs`Kuon~m#TiWDB+0S-cF4bwve>8aCrLbst7`hNng6ig!0C4#%+k1*KjchjgZc6r65 z$|Nf3zoh`yF?(9iwu8Bk&&$as+>`gQ-ujPde-I`!p0GGd$Mq)A)8jTX#F%i>Fq~OE zjY%O&98f8}9m1~@^=SrM@AGt!u>AYZvhQ!Bq#hueW4(rNT?H%h#z=aHOl=YU|1TD~>&Lfe|xNk#zavZLFINYbJQA z;~8Kv6?agzKdr5=?=6@ZTkL7-M09ClBX>w~jGUMUxh2{7OZLZ#Jjb&s3DPl>0HWy^ zjk|9pz!u? zdmxcl*!18aO7;NYT<#ANcouhk$Ti&yiW92IW^XT1a_p+ zAL^@KOI?FU^ylch@Mi!9V(dO(*E~Wvfx5JWy#&sc?`m6$L7BS7o-19#mP(hXs0r6I zS|g@m>7#rGv>Qnv#z5I>?OE{ElcY}t{wbTCIJ@59!qgk{Rznq=Z6G$MJBiUB`;{F0 zQ)shVEbx@fS6Bm~OhBb;Rdl#eXI--z{vnBX4`MI1F6HtJV7t&0puuKllvnlzalrZW zVh*+Ci=Dfupuk)iRn&I5qvZl_hYTz zPjkao>01-Rg!6E5ZM56)>0B#xJ-7b9EMY<&Jbaan011v4?rrBi;J=7CtYn7vGUaNNzwJ2cr z|FWW0&oyG}TaI*HnR~SQW459C561;|J#j#GEXcrvTfsZ@^%iyGk~{Wle|9|BCR>J0 zx)oJ38%_;)oN+Av@ z5}HK&a4Z{ssxIFgs}t!YY&zS5OM_@7xIRCWrD;=-hqjQb`QSTR{+hqYr?BNdfNd@r zVfjEQ$u~c(wy+!eR(o|BnCkk5?MPAj^QlPC%0^WBmEgmtPud@C@`1YOkV zcSy?}ftxSCx%3USc#(Yse+4Lt<>4!v6kcy-^lU9(d8d=1CmQkEJh8V!tMY6!$B2c= zfcqP@4lqkJdV;i}*=GQ5j7VNC*8WN&2#ehnG@bvsMai&lJ2v7k>4_+8>KTCi#Pkd> zwmDl`lh|h#1uzF0X)np*6x+ZjKZb^>Q>XrA^ZQX?2qenfCC+1(>rkXTIv=8cNv~OH zG*Fn`AT;BxMR`ew+7$e;JIfWjQ|`#A{DElZK@wi1zTBTAN34zbW}El5t#yv9Y)XB_ zc&6$K{oO-&lg)S+UQ;aZM__E>(WrreLM=Eup0K|%*w+W@7jt-BEBq)k2wkeH4KAH+ z1v{!oiONTaDnzwz#=j(+wvvw2kigSpQVDyIGZ_P3qm`@MZG+V~2H{LCFjx|Uw}&{7 zVc?dLpi*7z&?#eJmq_Xz%-Grr{(kx78Q@WvpyG4!J3=IR_0YMqj91Ui4^$pjauSc( z`FT#3#W71>Fm&CWaBM<%3AP)XSO=rjavvXMH8COg=oRiXM?zSXzKBri_JR|a{jAE**( zlX8EfS+3D()VW|pihaj4SH;x!cqH%^TCK#k5<`poV}vL*uv(t%yr6gJSkt)8H}?>n zgr>|^h;MjD3P^O7b9HIos=#E>Ax^B+Y}(J-iW7c+MECGlN?`Snr?P2A%>2;-E{ z78-7pv}+;B>K%Gspz+MOX>>=QtJeYK2h?8zOn+xKI^4z^UL{u~R>&Su7pxDPrODWu zn>%g@cUQfM#o(;QI8Xf=`1-(bs_bBwMUWDdj_7)#K%yr=L-nh$Z#c}A7CLlld%V*g z66PXgxdqkM!aiS<{3ZXJ$)dQ@N8&>O9hD$)_acN z;po)M>QS|rHZo{T;e3s;PeGx2OtR4jKS8*CX-FdmmjxTcQYu=RJc&)x6hp`$>G4;uY8sV!6_$Zae{4yObWBib zd6h@<@VB0$`V=)So^qziFpBix9YbE#FwACWBc%s)#Gf}xXsO`g0?_OFy6wClz;g&> zbM*Tjr!Jm6n_Pp;pa8mPC|jfYVir)CAt2-MiKY?OxX>DHH*H$BO@4+;g+RMe=t#O( zO!>K;0V{U>i~J0*7CgI?F0@siU7|@5CBLCKmg&=pKNhg7Use#bq>nn^QLw(Ho~^-< z9fwoAr$&S;#>ha=<@)X$$EG5)9@ay{+$BnjJ=&(C#ef@sc}}WLWek718VYVb1L8kj zlf=t3#@gSa4jJtk5bg_8rt)CYU;U0)j!{th(<7Dl9fn%w_vO6Qo8Fisx0yP1@9uBZ zdw2Ofc?L#|gH!E%n_-5lYQ%JmW|r8&CbOX1Bn*VetvI9vj>x6QR(Msk$ws`F@QvKz z#ChEZqIU>8RO^f!sT5p}ALiWJPj8iKp8-0%Eu-%Vo0E#imtwwQYJHH#ytqgTfRvil4$$^2PYppEhE6Gw^t6ca zi8xueanpj$3#XN2-n0BV8|bu&Xf#Ve`rc2l_R|B5moWLSddTTOPlB*E(p}HmJfg#C zm?24)QUd4GGe9A7LfUt5Mp;X|SrH*v;zhC{lUTEA!0_S{BR5~$<;W9y-|Ce*eLPxg zX_|h&6#41{#w0s9+HgTkuTs9!=3uv0I_%xg?{ziG=XvxP^@_Rz$J;7CWr5PWHtJI|v==D|&rf6#~V)L}O*o zpy6oQrCf^%raK1{&O*g?i`$8N%aaLp1}n>x_r;iRc)*LwksJ5#X63TsNBSxQfAOgJ z222?JfA&`KAYiGs)XaJ3_VMf@cgVW3xUB!8OAWbKulyZ1m2<6m6x~jgZ6o0%1MJZw z9T5n9ix&13XxcYfW1YIjBz-=f8ktm7M-+n0zgT=NcWM3GdXJS zxTz4pOChf0My_m`!SLgqYdb`k@VKL&_4|@oT6J&VW(jzs62tEJWF$0v9O*z? zko*{2MIfW89hXt9ym?KLv4dX_n{lqa2h@)|F)3xZWM1e{ktpDwljpH8QPJ%a$tkG! z(ICN`dBrUOA!uFtF>+O*6*nNT)9RjK=$eB+p_L{We~R7v>(fVMXthpNGb4=t&5``* z$v=5*xz&%ajR!Y!G{w?WAo{IM-fvy-K3%CUSI}aKzdmGh;Px@zHpx-`cU^*p)G)8= z1Q_I2xGKk&Scv`Y=U$d^D(0+ZXS%G61HIzUAkZJ>Jbp+$T+J$ru9x(2E4nT9WQGR) zSWCcF8;+7yH(*T7j92ttd9gaeR^;-9=mo%qLp5(Lr|#0A`GvBkFAyKnC2FIbfV1kq zy#X9`kTdXZrg%-Rw^%e?{`E)6Eq%gq&rtN8Q+fM9rra{lalg=arjL1>wUvYmnT=Fg z5(#HC(a(SlZp->MS6ea+f?sF-L7~+TUPgnMfVgWO(hZl`9FLmZV|P|NcSR~A6)DJQ z1JW>xNoO@pWEV4c0SOn9YEZI~uo`bnybEQ0{~EKZw~`JE9iJ* zq`zdvE!cfuyxJ`wIbt6FfHD`iK5VIm3zi|R(I}_pV@JpGI?iH>AN{J4#thb>mxS(F z_5aAxIt}+*qD`pXX00MYqurGqYbFIFnEVB02zdH_>;AF(e9aB{5}A_&RsYh@df#sI z=lE|fZPl&`>BRu9DN7ph*3_+0RAe`LRJ6OCkyq6iDfq(F+U`yh!ZgctKdP;vb(Y9t zce$Hiwt5>!Qq87Kw3%ha7sjYpZT;egUPxH2FkBJg;DRN{uy9+{UI9f(E1f?ec@Z&u zA6=6JfK)A`^grJf}ozS;t%^ zU}C@=kjL`o|I&f_??GK8=cah|lf(V$ljfAVx_RsBgV?-zYg6`@VUGL`)J{~N6rZ9o z^6ItkIH@}{Z;UnI{`K5k4Yas%-H8+a$@XVy81}!M)Um$DlwH=4cld8Xi_fw@hjj`O z0sHEM3rkkXqi}Cui)**u4|eRgQxQy`<1RDzT-ZnuG|Ypy)}T8by&^$DQMX?dx(BsA z?}v&5!jI}0x=%d4$Dfa&BBU8&;YKdYRkH0sX1G5zh3h0XeaceBa4uObikzi(z%7KNvLw)uyM0W z>03^v^WOs%O`kf492MH52F#kV&KkWLvq^ysn-nH(ZpJP2WkjF)o+PI|F0k#q3ZKNu z+vw{;@}+`vLha_saOBIYedh5mZK(S>Q8+4Lx9X|kHc<*bQ1hhQL2my;I$lqn;`E;- z)Tta8L_gGWOfreY@6?bgg=L0pQfC+=T1G6kNGoD*Dy0;67O7=e^)U)tw5!tS3cagt zXgbOnUd;FJ!C#J7C0R%=AKB15tqzEl0(L))mvzc-mdW14y7_P;e@UlwvO<>9nO%=t zQCbRU5M4Q42IuQ;cRNM5dif+*a`%fKtU$H#rV zDHwwXIiqXuMp@jya#s?;EJ_IHk1498(0SPx619J|M04->^>*b+g5mUOrUv(|mV#`t K^nT#;(*FSGqZ>g0 literal 0 HcmV?d00001 diff --git a/tests/photos/faces/Boris_Becker_0005.jpg b/tests/photos/faces/Boris_Becker_0005.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5dca9e6409e5185bc956274bb35853a5af17362f GIT binary patch literal 15020 zcmbW7Wl$W^*XD;{!Gk+Pup~@y_Yi^yCrt3*7Ibh85Nv|G6N1Y?aCaHp-Q8sd_bmVS z-P*0%PrG~ic2{-v?XL6dzIAV(=bV3Y|JDGn6lCOO04OK`0Lt?Q__qj<0-(Nl@t^i= zXwMD(B|1798ag&6CdNx#Y+PI%Y#base4%e_x5?d4nQMBCwk2#i9xJpgvsDY!tEcE z{gUxxMF*++ID(1C*eL)Dn~a=-l8Tw-&D(dZynOrufVDKG{0zpziR84 zn3|beSXx;_oLyYq+&w%4e+C7IgoZ(5ssDuCsz)f?|CCPVyg{V4S{dInT;M(xL@SB^9OSWRw{Hxr@P&O51vr2TKuTuY+AiuEsr`~Fa+ zT2-%joa+E)hzwzfZbIw$Bb*x`G(kR7)A9@%lIU`DKegzl6+hN6pv#YZ$XiH0R;W1n z$bYWTR}V|sL zYEqHL$zSY(G-IebTnw9eGDQ!9Yi>n>cC_!vv-7;Tqq|*ws+^L7P z(^{QZL=X}|6l2+$cDMSba z?I6WrHn?i1{eUMvKG4T-n&R(z`Wb_n7&m6c?i-gxGxN14fM^~=V{ewnkf%vpa^Xl# z&VF1xFM*>j1nXp1svek}e>_YM@6b@K%Z9XBF6Q?awK436UtFVHUrGiN$W5bU5~3ukGiTq}BXQOZ>JZnesS76~a#yjUoy}mJIuqeO0Ybwn37{{wRjc?IyFQHJULF&Z zW_DYkCo2R@hlWH$@P4v2=Jc((V^PcWu^TID*wOm4pXr=k8!NPvhw1$Jr9_Z|WCu1W z^19ZwEU>c`Z>m|_&(_S$f@x>H6I5)9dTMCI8ox=-G~yzoo4!WPNFDv>F7slDND}&v zCT=p^xOEV5{XkrzKXnn`G%YG=pQkX`z<74;7vwBX^%wwFX?j<9fYg_12PqI^4vFH6 zp!T>Ejr>(hNSiv2#LrFQLJWDD4JE~V$TW>rXa}e|W@0KVAyv8_A)VmFzX$w8-Bt9K zJ8CiPvI>vPlm7rF5kyK1lyp=tCxAyzcllozmINW zcgU&n_#&7_pzK;v&8dFiYb=YH?B=#27Y?v;v$uUYD5pWEeNkrjl14J+Rqt*ZUg?LBiY)wdJ}NbTqI ztj<=3>FV*k11IfhO-WF1UqP@Ytu-7;r)67C0v8qlQxdM3Oa8 zyP${+WbX0pxf}PN$^aS1{mXK7`J9X^RB8cFNnp}Sz%iVDcOnjO$(O=7xMjjiO3kfo zRT@@BpBj-6LYc3678aR4nJ9QKeroMsiLOYDMpKB93~x4qquNR6L>Mimr^fIw(zR>a zxtS$Jw^#w1{rx9$&RU8!&FFzTX=VVv4-_Iz7lr9YOVC&utaiiRpVUPKR@QQ#(u_i$ zQi%=0zP(QbAzIUGnQ~pn4nm}7@kZTH5!{W|(Z^6eIpb!oCct7aXuCex<=wj3Q5#Ij zO0Xe!bbJ-x3i;IvSzM^h34oEMVUng4EWfS_3pBO8%vULa11ZXPn`O8T--)SYE4@wa zwEYv*p!E$4o(DOFl?#$$Ud?G?lJ=``?o7N-U|9Xdv4Hbt4)Vsx5W{y`97mib7VDVJ z`yOupzB6#AU*gg8KK5zd{=I1&rICR)dm5UGXoW6ZY<>iHZlmR>1Sm|}P-djgTG!~E zg=o8C&g}Vq8AWz5How>>;Ai1~&Q0g!@sx0h87L~T_7CvlsAlZ@Mg;hzpA|Rps?4q1 zbGSGCZ_8Pj?ci}HiO{#!X=N7^XS??&DQx_h0a)Tl8t}v>gmo@HE-_QKL=i`sP)oMp zCSX&|hyP?}K_+uwMxA(dUwakLThCk3_l~{F`bs#lOCWKHxaxhuIce{_x0J4!`nXvN z@u;4hcB{06VQ)RH#wL>i-A!;~7%tW9mO1>cU4H*|HioiL8{b0 z^gv9i5${wdHI5`GZ&KLVt6g~T&WLI32nOVOOmE`4 z5&8$Qb8_0TluoA0f#(3Oi;Y&l32~5gq4iaqejj>mgms;0CKepG#lAMw%Cbr+Vk{NzL;Ge=X*OWkZikn>lzy;KZWn+|*3vqExq#UcXBFpa+kV41@XY zs-~3iNd5;{-0Wppjt0^j%pbiV$*IaH^b*pKQT@PHuGe4ip8X%7Ra);vaLu@dhTTLf zP`!XLF`tUV+1&+xgzn+Bfokr;TQ`X?c=z!fZdY^uJZIur8IU8Tdt-707K5@a^RsC6 zT*}9y78p1E5#Y`YnVtUGo+LA2(K?f0+BuYJ<7&i4&h0zV=QKZ* zm|+ti=sN;R!_L-uaDlyx-dP5JI6d}n%$Z!Xe$vW29tjX9&OWq+J!9)}hW#>ECrM2^ zUh>PKoW}5$b5VY14u(;Cege0Nt!82>SM@k?>@T=vM~-JZrx2Ry)T*%+=0~}FzTCh< z_)NZlxvLv}xY3{=*1nc#Ew8Rs?7#!fPT4;|Cx^3-)!e*fWpVFN`sGFTYo;?tbYVPUAaLV8VXq7zS?J9Ix=r%K>&79_2#69l!tAww>2A-NT zv%k#VLq2W4WNgz}?*Zo-#21}Ha_+fv>EK7eAFXdB%d9x#zh|VUU%-B~5z~K9LHEW( zaBQ0FSHP+#ceeq-e5bMAinVPuVELps<0ZGJf`i55>FFlUeMO$Whhmj}alNGK8Por_hxVA)= zuRJUtDqzk{zzeyq$Qo!Mz`ZFx{QBP( zN&v4-aPT1l{&|ycMB6eI9EP=%-X-MASpKC506% z$WknnDL;5^^6fT)gY0%&S+4G+zd^Q>gq9C4V4$|28rGP{RJIRbfzs7xqCN+e-y02i zA%Qx7uVN28XMa4{V3~3qs5lT-g3+pcxIrtt9HhE>I!tF>LUH#Mn{0Qp{jF38m9V2| zStI6*c&M*yz4@cKh5>wS6~%q!V26@r8m3NT;Ut853r+(QP}mTygIZMF0=tC$A};3E zo5n@wv{bmz2O+np2AgXV818)6 zvNwv|!q*WT%^%p_@W%oZ4_wYjpP&t-cl`3nQ?Z(hV+bx@A=*){TJKM}>t{SMicENn zb~te5cuO*(<>Gg5wncOq>kkcL{@n14Mqp7V?Hk&T_LW0Mwi|YVh=`>Xygw50*RWxZ z_V9s}(99+1l@F*rdsDvxc?dl?7Ny4J#2ep=vL5)FICd+NN$@5>`cJIMv0iOOHfMCu zX9DZHH{De4c)fKeG$UUF4eu)A8_Ak zwI`+Q#E0)uP{l7Q^fFy5WqPUmR5!biDnvd3nJid}KS>I0ZNj{_Y&7-1`?v^enpwO} z^VDi!3mQ$Y;x_rqw`AEyVOOdE2Q?_e`F7M6`d#Pb=BKoJqJqj!2RM=#$_q)u_r2_Y zY5XGm(W$A7tT^VEAnV!EYvo!>ofz}>aM_^Mq7|RdSkY?l@u&?!4ES-rGJUX_^(DWN zthV)B(MCmAo3(RPIf&0uYSmYX?EdCZUEu7Kg7^pM0wO})4H)h1(1$7lCgQTvNOuii zJ>YB%D}E6;Kj5En1m0Q_*pD>ogSrzDI=>W2QlBg+*-w82=ggpYll5JHq)sv2-THoX zb!hlb-BH8#^~ZEJcREF~B)@_#cku zqZ+lgV{!>k5}>p5P7@H|&VYyW8Ttu3{2@S=X}UxBWSAB#_|lg`AvW7fD{kouhUE*? z%@Igp$Od*q2DEr-T3%dGV8dyk8|9|tbws?DjCMPX+rg?@GUHXUSR4UI$(5Ze#y?b) zdA@jytcm!TXohXqsEOKFXhu?4kWMLjzqj3nisdD*2Rg$f@bqQsTqmN*jWD-MJ!tq% zmERq?*Yyzit6BQ8Pvt|&7R)T$4WROyx8T%+72-z3Nd-yF=doAwdT!-4&9;sh?Kqz)fCQDtKhpI!F7+Fi4 z3KDdXfAIL7+;$MdsAGSnBG!(KWns@~&$B#``Sm3&*Mj7X6rIrB?4Lb44#bh1U!|>T zrGFp@1NCB~3KRO>D7rM21j3lnlkD>6e52MsccEH!fr(fEwkdnW2@e0SXwt0k3n{?gD;w0=(H2UWu^@uZ)foWP*J)*enYf(C*P3kR< z!-}@}lNFj3qH9`D8EdzfGEkY&pJhWjA&jy`NmJ=m5J8&^Vcp5lG2)48Uc->Oq$-Z& zCe=IqnxjU{F5@GK@qYlSi&X+|F*(=7aD0(j_~VxWU)e=Jn~&)vo|yvcqd|jI>?CWK zedV9Wd?}e;W3)pKKvT|bxQn{I+`poIKsid{6jc1!T+o9c3h7+$oj2S?0w?KYDTp&( z2?#LA+3`kMjAcT55{KcP1ZP=>-Jb}50Irr{n70&qZH9*PGf-rO{Hk)Op&@T zvR?@Xw=D>g{}Z4cNiZbkn_whrLKb675iIG!_)3>8J?r~y7)O}#ltto!$4mXy2EAD; z#1plW(;eUIHdi;A*&Wi2Kg&&7@(SV`6(=hsm2qD|#rnS6=_uQEpS{2q#RkndI$j&u&q|>oo+t zG#32Uz$XK8QP*b%r0yi25*G)byvFX~!mKbBhHiS#PSjK$+`~+LnlS$X3~CeysaURX zMYPy+EF2wZ8Wy@AerOI*(KM%qZ9LM=s&fggtUJ+T*~N-TipTfE&}tH+;}WVB9c+zSQJeGysJs_NO01c&*)EF*N4N zQHj+H(Slm)5vY3FIE6GynGQ?F4IKEovbL^;G`eg4O*Ni)J@?xCCpzcHIFnomP!3y> zI-h#j?#8Amgix2cl46B3vYE!YbXZnBa!KILi=3I7o8z${r|$DO1D_27k|5mQTUnlu z;^7v8KoKUz4MXDhtaIM?;8bhj9K(Kkc3%^CjrhH)*SxMMG)`%3!AkSEPjohi;ta0A z<*fg@>Y35>=}MDD0k!h_pNQ9i|d`r&j~<-P8-wsC(e4*DVO52gYjVO*l3?& zBlY6U-3nzQTT5qQpNN)li@?;C>R8^NJp7p7XHnjSZPw`rtNQepE}l}ezI~xrcwM*? z1@Tz7VDa=`p|7n*J3$Xow&6a67twhDGu$7qqZpmqxgHMF8<9zw+meud=s|%pkKln? zym01Zq5C7P$|?3es-9tZv)y$g-gfhMy~fRb12_J1h{Ga^FBlXBv%ev4*IDhxG7lIz zz+sYjli~V7RU}icsQYc_FS(H@-U5?!#8#?zK82|X+d%kndG6Dz8)Lyz<^~z9=$N*B z114WGYXiX4j?FG~XN|wdQf{H^zALWB2Z(T9)U|veq4-z^T#PPZDq~YcZVx!#>WYsegM+O&9G(nM0HcX z-Ui>-k_!(xb_l#5Uqz5!0=q^dJlX%myj87OGPSh}J6gFcjdQG$WrYo~2cx$vt=J?k zarKx#$hM>@Oi)=?7B`1nl#Loz)sl^ReQRf06<Fx>D*3@@{#GXK&u-ea{X0QDC*ub5iyd#7P>nK8m4dEv@(hAO@Om zeuH@U^GUR&{_0Ry6;gO9h%Ad>qjz$pqf9+L6*0ZsXv`3K7J5HKd0h5*W(KYCp_%dA zfE|t2<`^UVoOv$FeW8xB@C|DjT{@6G;-(U{4s(ADOpD~Fm7#tl?`o9ZobQ(yev^>! z-iedvW_BjAl~^sV8XcbJ0S;?^Vf(~owb0VrQ(05v;m}1NQ}?1Cg2^nELI7AlPL06~ zeV{D0G$5Qbo??$vO)Mnn6}rCM0UsQ7LbosB4Vj zh}wyRFAYtBBMZLYnjQ5=LYVUs8sV?I%ecELv#h5Cu4WR4lQbv~^&3I*JPAE5Q8Y+k zKq>FscH&AemZl7Zs%Y>Zpwgz-_JJ$U-B6pnC|6-JeOJQ4*Ss}-=8|ci;p;~;^Ct#w zP+(NMo99!$&cU{Eh>$Z}XN4k6sw_mLtM+7+bMlFDKl4p%lBx|`96^>u#|7$43%En) z+juGTEny_IwWe}`M2iy#Ps}m!`p4ExZ$^&zp&V1F9pGfx!U~)eGp#(PLqpyJqd3f@ z3?LU#8vGXJd5hZp4}j;whG8>PQz_5Zg`rIytL(AS+KY_8X1VBaq=ZttF6GWfxkGns z#B7gG>g%FUKFZQePo!q^rerv8S8TLlz2~D<>@Ko>oyl*dyJZ<;(!26t0rND-ax1jb z2|9{iqZWy%{D3cT=4+(i8e$=y@;2k5k6CL+Xakk;0(N)`kc93316Wsni0eQ9j0tl! z1{Iefb?ClZRgavuw{rS`KS3!QL_UblMtEB^#10xJ;1Ck(og2ih*-~C`a15&?o6u+0 z?D|!^F1GltfCRno8^G3`A(VHsA!_{7k<-qlj#&hstCok>znZQIs2Ri*Sn}xg-?x9= zjbyd|*@k02c2Vt>&5a? z`PWgu^*Q^Qgk};c2wU`FBS(8n>S*-V3;S~0ifsschV0M|fBjB#)0N;_#X8RFl4Jtk zM#SnHiO6s24>hZrT;`Nuc7|bZs=antlrIM( z{JJnim|d3Pj{2)V#i@$pF<$wt@f~GSAry3fYK5EZ8*(Y)xTZ%>HH-zL61e2ZKgwOL8WhdFue*Del11FC@Pwf8$wb8E`ftcaHi63s2CdTyPDJ z$#;E9D8k61!c7=mS7;0<#|8j(s%URSzxle1ZA6j_2^8+FN2n#2OHR}S!-Sn7y0CFU zaqSOcJS#0I1xx|tT*rgJ-sbD>bMd^5np#X#)sLpCUG$m4PcAzXx$Xmrj=p8LbV1uwjk4w?#qHX*+#i5+LZ6a+9hYvd4m;uR|^5~=rT2~ zF&gZiDZby0@s1yRa(&4i*ED$qYkws9bmId~4g5B!ePr(5Z3Q|DX;kg(#1j|ZDv9kx znU4*(2EI$uXwhnjJMplcC||T0(?1A$n45d^In!qSkx>dclTp58uzVAyKKwO?WI&#A zjAp##!PFjiwW;N(s(;x8^Ld2UP$_dMVDc6Wj@_efgd-0c_0-{Z;ip5T>1*JGo4_u` z4O?CSX^MyX;0S2AmH25qwIG~tb}~&Nqcm43STY^xn{b5n6ammA!la?y6$PWa3o1-5 z<(2Ko`p@7A{sPT7`2TgVSrz43Zy_z6KaSgS8KeRGuVk@*+e2~e=bLnQgD1I;|8nuE zh{Re}?$>!6CT-_x@UFUUA|E$MPVbVoWQPN)VZ&on>zSVdNaYcda*!Pg4FC4Tck~bN z%G;cN%&m>6;Wf?k$wirp6yowa@?wzsEqwmB_NS4#5e(TIuo&O4wroX?0_TocdHaGM zyvy@@U0!a=AM`Mx{xX>D=BG8|IF;JqiQa{k7p^@NQkh$tWx7+~z3L<;(x|8+tC{#1 z617-)rYsn0tR!VY2&=s4u}m zOA2k`N1l`JHVg@$uEfitdeq?7e%}0nHNhU2rNdIn9@UrAsU{?q;@!BM3jYAnAkbRK z4~{1u{bhswyd(3aKDPCUCBiHha zqxpU~(*PPQ?RSu59gZ=iR*w61<+Kb`k#CTcPe}bR{^_V){5%1pNsK{`Xh6V;lll$m z&RzH~>%<`pdC@q18n=Cnh!*O&eo|6$_F$N<`f8Y{UW6N!RJ%qv-DOhIfdEzdz~2)C ziI6<^*{CPx(Om1a%_^@NslIUY2F1380N1aBye8a>Uv)i6LbKWW@ z5Etv23E9{0mGw~D%LJql`su1Fv`b#IZmTiep0m_e_h0pwgwy8!gh@nb3&FQ_Wv>#( z%Qs%tG(ZA9;%Kkd8GD+CGVsDdFrywtnx1*8xwr0bLvNq$q#? zDsNV)f<=BAT%b&FXt0X7m;YPoZ(MEun&jOQC*)KwNy51ZPF_cPN7fLH)a=&TTfo^?1n&S>=`glI&mc3s>pMo;X<^$BFBj;KA zkUQC25k3(aPnVGQGnE2NE0i0H3w8IYZZ&%y&C5*g<|t>j=9!wZ%J3WR=}OvT)XOt0 zRXz2yxW9?EIL%st0=e>wvr(L7>WSXF>~rqDdHu3R@6cfb4v`O@V=Ql((-{p_#w#-v zq092(D{SGa{$0pi1Efq({{=-fo+>wY#^N>pW$wqAr*ctGjc2t#dqLE-N5V=WbG!J9 zK@W!hU}nSqs6tJyHPWhbZK)Xec}BTmL+MSIw)~=3{r#_DkI3xuxfR3au>qth0?(rc ziS>X{ZNib?k*ZuUKnZ<~#kY7gCXCG7a2{60&rCYJAgDtqkI!SBodyJV< z*X6!pfFGtWr*+oLW}n=f#JR@`-dpLec2fZNradsjx9tJ#u}PfD8&VfJ;-S&ic+}1n za2$YPwruq=L@WkvA9IvNp{aY`nXE~t?4xD85uQRhhH20}h6e2^ z(v4OR#AyVjYry5cj>Ipt3jZD4DTRcL5m}I>CksC^l`FNga(c$npB)P($Uz#q%lji{ zI!rM4rHY1njvXIiyGfjO%kZmz08nK0aPp?8roSZ6Gj4z>{FNV7VH^{Ull}&nnxnKuxQm|rESctP)$c_7a7Phn%gwc@y1lRAB)0gApFEBAP^0qzBi;<(Q z^VFpCsji66a)irb3zdn5o1dJ7Bvm%7&^K~QRfAqrvR%ORS&qI6E7|zE6;U%UsckZz zl_`HUafMLxV}1s(@>i!gtn!O}9Y3b)R9FOlb?3Pnz4^6vPx1O!Ppn>t9cPh)RK)|A z!FY~e$ERa&`CLD9_gN|JKwL#P^NvU{2l?`mUtS(4=C!KvW}b#rJmmoTPev&UG@$km?535f<-mHO)+EfeWo7a&&0R zY!j^I*kK7KH~#tr!QyWXZ|n|MhuD~#?b8WTDlYlfHyo6~=~}y0Qk<1nwm{dBTBHtZ zrRm?k;@T^+s_3)!=UmQ)C?!%w9vK;f_E!&8xNw;`8qYrKRgl&flrK%h6RqZ|MGAT_ zx@FetUFyn{ob_Q2HCHZ@DAY`w7pGk%-z3KS{IW|AVa95%A35O_urz&1yWg7T*weyr zDVcwOgpe!VO5I#5manrC9`@{6pd{>WGa1j_8aI`~*ENwiW*9CswL)dqi%L&pQfy0iaD|y96QLG%hgQK^co6LOy(WxWf79I4+^b9u|YtzU)Olvy+UG?w`$ zOA9I1^M`$|y7y~9LWe%1`X2RtSt78G4RzZ+1`5=hJORPAs zJ`tCvN4It=bNO3X&!A7zq&TkijM=eb#DF=KK=a? z`XCcab298bZiHlZ4X}{Nuz@3At0fU-%EBPRxW3cCPXvUlkrcjs#O^tmr!aTUoA9)T z`14cMC2yH1L87(PEECY2!8l#X29VS{_UXLF72B_rht zK?(^WO9QU7@MmpIi~41Lv_f~n7p8B{B}%5VT*<$@rzamI5bWifcsjHM^3`5xDI)^K zWY}VdF?A-cWgb3tf$R@oivLze+Jw>8zRadHzbgOJO8pPeA=2!)smR+hwG+~Hrws^c zFxDf;mw@?f-m^V*kr@({GEBMDGi0fG%X#;#t4y?3*C2W`3Of7In4?3{rXerBpG!6kC`G-JovbEn|w_ZHfC&hQ(q7^ztsK<^KXQP|7ZThB&jOm)4@AhVqgbzet z<_e}@F*65W%RZ)We7cppGml>w=B)i{o9oh?uw}Ow1r_#V9OYmM`!_ttGJJZhnMG8cc5M1tG&g>5 zhr_FZ(-&|KpO%&gXDz44{9`h4l+#qh(ABCas5r*K^EzRL9^1KQEK-7uaxMK#VxT$r*HW!Z>nxJn#!%p$gQx3Cz*` z*ZN3 zmzeebjr|}e{PQv3zQ(6pLtY<9$y_2KB#Ps+y)=|28!w}+0&|%Wr@v?U6TOAwDw=*# zE`gZ*JH>(N!~-|cSX#Noj(9C94*Ek>kp@i+zY4P>&A|f>{dBO7kCZY6^W@&dFf`Q$ ze-6zMTt3wQSbmCS6eJqpHQVyDG`G~_^7TNs7ArRaOJ?Oz2+GZ!1|{c~8zw~w=5sD{ zywWR~nAX_c1VxvnY|%aQ5D+U!8gPS`KKbYS?i{hq9Q8vrmCBfjz#_)Xg7) z(sqO>E@(^SO2R;?3`j!b#j|jq@h|N50*MO_Vjy1(Hi z=n*J*MmJxUUNw#i6_MF-li4O-!@%v{ih0!bHRXzfVV*K)y8!9s{}tZd>(37gC`q1*Ymny z-}kMOM0f3sLv%iEUc(+l&HdN&;QtAhZ=JD=E_GFyRQUPr^e}2{_E`@m@MRrX;Y{`U zsAb~DvPOL)FD8-T?Uk)$8*VJ=yetw3#W z!<#P~KwLwHYPXrlVim;SEkD-Vz$;f|vzv}4CsPm<-6XQaP-ibSY5@jKS|%xqSV=c- zEkO_aS}YHn*&R0j3jSHPRx6PKBy_jlZO>$!<<%a&Q_gZ@E@>;$qR*OWi1@fC2Sv5J z4f4^y_kAF0xKTr{>NzRyH!i{a?43OEx)Ti_+`;`C@R}Fjyu&5okVE}_15tS7h51R* z`mmWMu*08?CiAMV%I*>RWk~wH8s@sY6 zFFoO3g!>(_uF=kZzoC)*JH`oWZVVTSL7KoRy33bZm?B|U&((yR1V4|f35Yvfa@Y12 zx^X-#QTp`TxhNl`9VrFd4dea8J!hvilpS|;=|o%Rohr+xmYo}`<)sFx4lYSIt_aHGDr-59FLbIw*OFokzGz7AZegRhdRf2tN9ib36&5Uyxw$ygn5=a)}_`E+if{2 z^qOBU%H6}jCe6#?$3TT$q7N-finNOsC#mKoCb`5c63xE#MX>UbBRhJx)31qj%uyh% z*nQ2II?CVpDRc|_no3mtCo-%P`wovcU*Qd|@tEOY?j)caMlJ8+DziT%+D54N@S~~h zV?ZN%_=?>5MogWs1i}kiVs9CN8@%c~Mj`~h&)v)}%YzPM8J&Z0Z7)kxen0gkE57Q< zdAxbVGr}@lHt|Og980Z>5Rg2)MK;fPdb=*nWX4I0r5ko*_!_~*Kxdxsm;)|2-?%iL zYAnV4100ku`b9LNrTX>Yq{R^Z1DG8+Y^b)d$lUMV`nue**(2I5 z1YN1-xp$hg##qy$y%7kmUiUJP3?efD(YF32b6A|Gz-ngWC0p$Uz4!4YQ^kW#_tPtP zi6=6{h&M>KlBM|$(ypjVJ@Ml?W6VMS-i6x-X}PssDtE1x9M7CRD~pH{VDZ%1bcBMEa%7=7wv?qhkI@hGOZ zG6HrTR^4H{o?$mJzLn`d#iYUDpRrlDYZ#uID`7Up;yeXe7(>(V@Orhi{O1;p`qdab8thPy8m8>x@8ELgCWN*o<_*NGIIGBXPH9x1;Wp#e4DU zf{s}fNSxv4(KWdjA=ni7%~}{a>vP9d_Z85+$Y?n(QobwhvqMZ3jYDt37W3Uoq{`<@5iAPk5TrI z9U7N`DyC5ZW^n@!Dnc7A0#}-dr4`nj=^QC)kI=aHrI>b(&vOvR+}fG%etBNoTINd_ zG6qXkj#lofJR@;ueD2Xo_UrpUKRNDlv2Jm-ShD#g;e*f_lk3*}zkK4C&HQhNY<6VV ztJHN5zvqU}e9!VNe*K7q0gjv>^_p;7K{++8TA=ft=Ci(x>GAy`?~eZc546kGR?Z(H z+QoPBvy>Zt5)r#!mGQsRH(#Ig!Sf^t&d60s`Q@W*iApArV7jE4FW7Oawb2?wLl}6h z>p5geep0@zw;298`SLg3ppOALwGMZspI41xq|>5Yw1QtFC<{I)pWlQt)4`afLsd`v z82r00(7m0uMCEv9))LGpcCO7&&nv|!>Q+SgF|i$WU#}AS{;VxR96uA$RkZktU44pl z@%TyFy9#l=j_bNwQx__#_xHWHvtu*YUvw$6I)Sr?l9NYOjPY`RN;+Aoi}I{$62lwC z8nX7~$IM;9-JBU0YeBWE689tZ0ElKIRjUM2$9+&km^4}5SWC$Cy@sfJvG2jhW3Gx3!Zi+vam7Cy3Nba)izV`Zu`&$62v6=hd*d!?ju=zfsg z7`5{KPIQp)z1T~m=c?=Tt|x4<`;SZRjGan|Q~73%qJyHeThfLgK=0&!2K=nNfAMU? z)#wbPf5GqPHR(mtak&fMDBO>RGS5R{TQYLsx2FlLmSh%4+Il;FIX@VsIAnCIKz00Eke^psmUBU-&t2&lIJ~D4t&9V_0m- zR%Ur>;n1`bxC4e;-=;lbeV5qiyGeE>-vr(~`Xrg=45#@&l{GOUP}aZ;Z%%T5p?tQO zXg=(&t>})-g$#QUK*e>Nh8+9%4LCM;=pE!XLamRCc!n<1#_h$&J{)RleAWB*_u&wy zdt}ub$aT8%cSdGO++=p9g+fEGbcD$A!b?d@fc{y&G`#=pw+hVgfTc(5L?u7D{AKd? R*}R=m&o7g(9Q`-{KLDsav8Mn4 literal 0 HcmV?d00001 diff --git a/tests/photos/faces/David_Beckham_0001.jpg b/tests/photos/faces/David_Beckham_0001.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4ec8891165a3dacb190ed7925d0dfc17887957c4 GIT binary patch literal 15945 zcmbVzWmFtN*XH03f#5a-h~U9p5`qT^Is^?a6Wl$40KpxC2iIY6cbDKYxH}9m=-|uy zeS3D#*tZ4FtPD*@$vC+@$d);NnQ{T5)j)8-LfkR4wM?m`jY)>5kVr;-I;1&&q5r9gJf<}z;)B^xM`-y?_Uk3P} zhJyO+BPJF$4ldqvh1wSYR1`EcRCKiec>P@4_xU~mofw1U754|smukjXjEoS4Q&3V-zkc)f9SbWTzkr~Su!!_08Cf}b1w{=_Ep3p_S6x#x za|=r=Ya3@5S2uSLPp`mVLBS!vL&M_Y6B3h>|D>emwg3LKX4I0 z<3dG8M?=T@4=xl`_vaIh7#-skHzvskH7sMtmyA3=u}MG1W>pu0=QHWP(HD`}%De&>CvrdMzdn0o4!t`efXO2aY1eCX(@DPef6(s_ zZBd*!wEu2gmo`*;p8QXkV~oL3Cn@uN*v^Kv9X6x6`G?1WjXGOC>@rt8UoQ)VY!_nI z0p5tzDT2_1)fP+b5GJYuWWJADv2?)**xr_wyZ?RE7BHTfHUdq3-;a5`mOxeG^kF2X zHg(pR?{zSVMgMD6hz$^DX%5+UrxiwNQP(IFsu|t*1M8y56&k{g3{+|fUT+)on!HK6 ze(x~gxVq2^UfMh~71OuAZFvIBomxAN@SR+x5I+F|u>!NoSzL?brJ5&m8mncSn-kW* zKI#ngVV6=12DnNR7+Eg_)r9)9>T*E3v;_?wYVeM&;~c&_M-lBO5s(!K3OcK3Ra z1;Zi%?rw{{W@d)uw+z(q7S80#y;HrFj}`h|sBk)Lx;^%3!$xscBM99H%czQqs+S_* zT}~Ll?p?1)7+~0la?k9({y=i=O`Z|?-fk9LNOJ8fa5A@np1w9^qftpHpN=dBZ8i`q z_)of=S-0e}#zO727mh}0lT7S}@9ReDUs!ujfCn2R_=A8-@>Tc2MkyUr_Y@cTmE|Hp zLvo#JoCcDSwO$wP!lBuc+8RjwnD+f;{XbsqO_xOJzRrX*+|;foz%Rk>(GPd*3@oG0 zlXVb|oSGoxAh(`&7b3+p!G@f45t7SXQumyk*}2jsZW&UvK4tznx3)YdiGOz&WgK>R zrFqzQKEaf%1hylrznCMpfX#4yU|5SgS-1jGCI=%ecL%3LO1YSfqRZw9&wlUVo*6ik zwsA7YnXhkS{>C8pm3S$pvfnI!wCXfD3kNXh&wFPYw_6Ye0&QbyfsbDQg>vzJ6rNa2 zH5FDH7BJuEVdLcYwXPFi40Ans72TZz!%#AV&2 zD*;%WCpkdc{z|7`ve`7Cd(QC&MyD9}rLrlnG5YVlu_<}8xjnm+7;j&Wd|f(f$0o4w z(9jaWc%pf_8gk6Z@t4PrZTChl;Rzr%yj54VD+AycEDTG-DzAdY;l06x= zHa1&yrNNK#hS7<{)u!Y|vx!B2Mn>7y4qN5Mcsh3M;(Kb;AZ}oFR&CrZZIeew3--L< z4;IB`*>;-8aOaJ{^&&~JKC$woCpGM7v`+vRcZf3YhU;kfhPTeA0=9g#2Wr=|PI!!} z?nSYZ=JBV=)N_fWN*Kp+wvq6CC?z4K>6_Y@+kk5(0F;UBd?gJri(?Tsr9Y|jp$qR% zb?l<>zF}hSn5(mmRQETL!Q685IbnDDOmeoGEL6<^kT_g3*ci)ZM!22zclk0TKC;L`8(ywo#xY*#xr^PEQx89lZq}awKbaPuR~FvNAvVa3WSMY!*rz292tS9m|u2BNBFLnKA~oQw=VOyIVrrxHjimda$)pDtpPd5 z&VHO9x)b^Z-9%UjHQavQbR*)bLVxk#^M=k$KzQ&I02gwnap16@RU-zf_AjZM8>u;y zl_T5tlX$3s9NqI47XEU4$R@RkW8Y-(Wh*~dQxOoS>^s%X^m_tSOZL&2e0Cp;all;? zL~78iz3m%z;P!Nwrvm8H`ztmB;SdDL%JgTKfyib7vA$oMLYGjrhm0qH6_m=y6rnn& zJGsnvXdIf4Ga!)wRde}Z1zEUTy#ewg=Oy0Uzi+7yH<;mK@a!KF+xV~r+9T=&H$vN} z_CPO(rWCK%GnTgEz}sAl)%tZaV&K2lG4E41mdvNkHknUTcI0?rlt{@q^7hWUId#`crQIl^_lxX-5ffbtw)8&H?K#1Sik9^ zu#)sjm%5s^gz=Qi7#Y%8vx%Nr&)a}=mBW_ZPXC{rlu*aLvS&ySft#|%7k6nsfdeN_ zPUa;g`c6>cu3_@Vvb*kS??q`lTp;4FvZeF64C9s9g}r06*LjACG7wZJX=cSY8*yx9 z`u)t?c{Y)GCl+GlnzD-J^Hn0PSk>OMX=7G>kg@WR>JXk;dudSzUmFY2*&B)f0{?D)})4K_e^M;TXoj3yy5sia>brV<^TTkEQr|;Kz(T? z=Ei=M^pIg!Cf0kWZ}2e~3tD;PTiWsW)HNsqnrkfxx2GAri)7O6o;|SP|YD&^9*k3T0)W+UZr9>l1)TMfTwc!bsoEv;&N^C>{~V(UvP2 zgIK*dsOlK=BrTE9q0)hl(`sj?ZFr9nEiTLD9dMZ?b8jces~Wa8@Sl%G`dh&jH;7&r zruKoPnrBFD{Wh7+&OamNHbtKRFAi2@mKM`r47)SEXxpVD+Dtt(oW#AR6RG+Y-z@PD zNmk~;B(>m5z_@*>pb&df@Z~Prkk?3I@9|y53?t7%H|t_K=JoG)x319S$Cs<~t2PuO z21hKf=asMOt(4d^H^ofn91Y0x9@&tQ8=*d;**5A{v)Tjg;8a?)w`#FBpwi&-l{keo z?XEEj3B^RdD98;^o-^BGiWRR&iPshvTXHtTxn9&UJ>AiWVaZ2gT7t^Q{xl=dBjn5U z|??6FB>M ze^ft_mNowF-6|ah$2&B8UB$(JH-(7zIbrY6h9nsLh9B{e;Tvp`dfHL$O))5qsqkEV=@xdI-VZ>tO$X7$9&DnT%k=g{%ty0b7CGSK&D>&+lUaO?ns)ZoaV_c+ zf)6Em6OS}VQTbEyP~5Z>T3&fgDgZq7Vr(APdF7AUM!0l%8-NXcYg8jDNDKObG#ov@ z3VF!3Xm9ve=b#)X$G{*)v~38vv+?<4MCjqv-I;3kLy1q%LQB$BY7M01WGi*3+&aJc z1i-%#g+{jn^JX}^N6gW8z7lC+6AjhgQrX)j#V`}Ulix%klas=tgknBKw^2l+d!Qf0 zl`q1T17qYEh@Joj-cNub1-2L9$F}00-j!a+muYqVW(ou{O;fDsW9zk%C?a}5xN6{TOjt(dv!o`gm zz8slg#F5y16BI4mF9r(X%dd25;AGao5c#f^iC7)uw_ty z+kGhubQ5nNbMN)zojTF!y-G2D=TH`i{mU_WvdXN&=!LbP!E(HcroA47M;%Dt&a!p~ z3V;dhLde&v#5O@lIp<=rNbw8DuWg2cvF@4vzs^KJfYqx0;7ie6w0<;|R?~lq(taV7 zP_yVx3JSL~JR&kf-R|b!u7AQr1S^@CL0KN{>yn|LM_%`MN&eG7=P9&q}~P>-Sgm1lSdGy*+Q&f zqv*+si<}JR=$IT6A#W=R=Jjk=2w&qJs?QOJ)HO6vC{R5CzHO&A`coWGB*%)(9jFho z5%jChFq(by)t03Keuk;?%a&d8UTg2x+l(V6`;{H@ZD23;^tfqBO#3g6ll8x#Z9g_9hLLw48tH-I z4wzSyGT^`{K^J2rEzJHj)Mtc2X$$X%ru=vzzT+JAoBA}v$=7vF^=HRss_zw}aGOn$ zjk^+;(WlNvXrs2fVNm1hl{yqAa$l*c{w0?IA3dJ*P>bMgM?ub9WA0D$J9sGn(U)MFCu6RN^ z>+2)JgXa6=HcxCXD903YPYtQc_}SP9)UjQCdDfk&{$PH@_lNP zMU~RFH?kd_hlWXx)o4F^eZ?uy3Yo$Mde;w2G#wyky{{q7)w|-3_s{JuDgu|P|Lc>7 z=L7R<_F9U)#YLoaWL>pH1589Q9~0s9Bt~iK67?Jp@BY@r9R=4@lB2P%U>Bh zZNLJ6uGp$fRYbWC-6FdQAl08KT>N8%$I#RAT}CJ6@_D`2 zDbep0#>UF3sM+3Z>n$qKazu2#7f~NxtI7h)NNxD?nyF2uKkE%lt8K@X+W*_H-<|P*0_yIh@=Q_cJhq(c8qy&se!~q+E2ax- zgKpQIRHr5@GT97_dQ`O99bc|X9jxkJW)WF^`B*XZVG!s(*?7QdKzzLZw}eAQ19Upt zUM73IQ2Q?}Six}=9NI8(ue#odQ#ZYfm-!`58Z;3!Im;AbIet+Fkrf4>#7;h%!rzvDjo+?d$q$v5X1A7H87z!t9C z2&&ub%&i!ODMy7Rc2^n*oYw{#K=NYoIJ<;A^W~`;+e=%eKh@`Ms5kqlBf_+;>Uo_r zQ?4N+MEy>q!}i6p7NHH=2NRk7`-V4pexr3`Ze~s4RAOsp)CZs|$4R%ESXP$BD2mj* zB*wRl!{!rNg2T|ZpYQLKn{1M#PCyaxq*pMSCg2d{!)E@pn)>9nDXtS!^LATWFxvvO z4Rt|RE-^^cIl}D;a0!#h!ZWUD=6Hy=_sB(>M>Q^6iI32uelZvmZJFVCn_QANU7NsTl@D}m*zz`n)Dhc;k`u_ zx$_6MEV%y8gyN()sH>gKRavy}78s;E?VE{E;E2)yWQv!rd_0e~KQ!X7y570FiaJH2 z;ap+584d}!LW_yJD#SJcxVQg!6N=O4>*)EcC$Ai<9osj?GT2p9?hFiL2b9q6XIr6N z=ry`Op8$nPz@Zk^?yRxtynP@NGC-y(>YbJEF?JNcX^(aCZ+^!WuDfBQ8wGJBj&x&w*KbvhX*5UiF&auFicvQ8n|RFdy!pAVpT+_F`dm&J zo|2uw`CLULf6ECz>6OWFXWmtwdbh`3CB&ez2#p=**;0F7Uip3tYTIg$% zH05O6bo(^Yg|4YDTobH>r22dbHLkF(VvIc_H@=QuHzL2+K`>0sU)(7KE-Sia6lUdn z_xDCqD4QZ=Et~8m#`SIlikr82oHNC}K!Xx2f-#42%gY@WiDo8-cG!nT!Unx#%i^iT zafXP<^|bzan8fIZB}niX#Fscne$^~;a1Rs1XZf^g!JY(J=N%AX%46wnYNRA%HI5NU z1aN_h)ZPL$0u*$&u0e;4rq)YrtP1X`sZ(*YQjLp04^^U-vCw1n3FL37ZJ5bno5%Ps z_fsqC%@tND`@d7yrGA!o2MAJExzx__2p68beEss>YLIjktCI2;*7YdATD~T$QPeFh z>!X$gNlq(!sjyJ`;^Ms0ASuT^Fo{wE51>SlJ}1kI4iv;#(*5H`(yMZ3dcKc?VAhrx zj=80u3I11`_KLRZ<zM3s^D%=jfg01GSfp)i5{sEbY1=0z*bbZF$(lI! zoSa3)t5h=E^?nm?5QtSm+q&^99q5_l!#;2~A}hOBa_SPwsU!k~o{7KjR20z{N z5De(gxYIyZhKRBwZo{sg0GMxF<{O^?Nyq-Wr|rQMYO9Zv5&{ijGKu7iC=*pgOs(Ga zfqn+e_1Ag+57;n6nzh*F9%2WGESD>`pX{OpCB6nd$8I?ba*LYH?)OcjUEfB8M$z9- zaXd0KV$roP$W44pDRf|_)M{}v)$IM(t|9*AW5s*3jzZ?(%Deu>Ym@^0{dx-B$OvtP zrlHyQInsTbDX${QU|FVqb7MDXFS)0k%!YXXI@KaSM>xP^p8TU`G3CW5_#C7?L7`kRdrZ!iXeY0Y z2FL{O*eIr@ZOP57RjjQ1+Ft0dcpRmEK~I-?rG>JUyuHyXsevGf`tgrbF&Pk_+Jlt) zwjkJj$i*#C^P9Opn?G=KF@eBQtQtdIc%U4&{7k$Ar;mj?-bZJ0yQ?$@VpxrctX&AC zE%|T_jl0*_ECq`e%B$~eTJOcC`?(I!`>4)x*-f$u6)?of{xd^C&fH<~JL_z0E6?4X z!3?70r{8y6mJXNnwF{g&j#J-Yb+nV#$~bWDx=Ss(5$Yi>ptj?eY!H=K60=NHYg-`p z%^@$B?SIjlzPeac=$2&K28wU6Vq`r1Ae(6USk{R#T&61x%FBtm=R#$6N&w>Cjv!#m;(k>)=m z)mMb`&J`$^mSFRS0*G?Ry11Lpcb9=nv&LvAe2NHTmop|#iah$lk2P|+b-k7#^TPZ_rl(4ZA0CoZC8pz)z*8s+7poX|0Li63xw zGXCXmcH3^S;11d(8 z+tFs;7E6~m&*bwCh2fFx$XSo<^Amvh$R=%p99;=}aSo;|rWa7nsUuYHO;2opIyp*H z*buc09~2gQ*fd;RBjD3RI{59O3hk>87WTRV9_CQIQkSQ4?#NSUt=?14lD*loRm`e( zvkbleIF+S0eaw;Yrw;l>2ujzC)N(E2pNrj0Nx|(~41<4<^5DP7f5?+IXtto4X<#fK zTw9c*Hv`D3GH*bMgu0!@Waz!;Zv0h{Y*1^FUly|3ffjl~-yeNuS3}efVCrBxONBe; zjtwfejHtHD3z@o~Jm;k0jUK2UdadOCe(-3vX9Puh;)2@iYwbZVQY&|+x`cSKI$q=# zooyIm@6t))o}VnxQ?$+2EXr#*dzvpsHwQie4xx@yWyVu(e`YiZ+qzn+`F*d1llfg`T>$*wS4=N>>`EI4Wu1%Sy4!cZ}7s9TMT*GmP_n z5Mi|BvDc0rbnIArc>#@o*;}_lcWM`(*ZWL8z13_s*_fPsp=JW@;9JJ^c!o zV(%p?E!E=n20q(JQ8I;hl5pW33jBUc5UTst!T4wzAL?c-d2EI zt6w0J`b=*X$C`I(U~);kL@a7~uxuy&v&~ePkz-?X?dvzfV>Y_+nYS3EYEtBtztAZA zNr6o)RwZP8YN`d2y4sY_q2~|o)ZlFi>4kzk--es&XqzIt<2Uf)wN)yDlVFUI^UkX@ z1&x!aVk0JG?k1mO>&zc8_&4VdOKWU*{r7hFu9Rz_y{L9u@?29#{Bb$4?;?`?XR5va zSoh-1leIUJm1P{4@{Alb@8(|_BSAr7f2VKa#UnUeUE;(RJ7A~!9u|`a_`f+bw$uUy z&Z82i{J-aYDJczb5H1IHuk*oi7bY2*bovJZ|Bgw{LWjwEip~Y(ANTezp_x!~wk+3Q zMHeDaq01I8F<%T@>jlABOLmyLy~*Fp zyj=3~F=I{n3oarf{%M?6KWk6aCb`I*A58e~>Dc}qJ#y_W@L$#z1x4he`^}S0DlhLhmZ#g%2^!u|PvUV{%(636Jq;eQ!k40c~`N%ky zjk}LSc(KEl@o-(W!_Ezd-l&7Vg}IWru*gAIngPhGOiL4DCU;dg07B~mh)3rj?No8o z6nWHw0<#7HB$@P+!%r-myy-+RIIdErb|qaUjIUd(4LeV578R;m-)c^qdH;6Ys!?=9 zJMv{4FT~C(_u2mZu~5RrbGH{xEJn9E@`loMDw9A+x9m(FP5;eua}{sL+7;9S8F*)4 zWk@mMGU&t2N1gW%Re|`Mz#9HW(}??8c`Sl)$K6*(V9C@|2gkm*G`8{KM~k=F?91<;g@l^=!b!{7+#a>XuMoJWom7TSW+D69F@>L z$YM@>=EqCjJlx?K#fvuW)Gpy*5Fw)$VR2Mm1Zc4hI|y=5kt;ioU+1{>oD*g`rRf$l zqW2Th2j2$o9~mc>n=w1Ww#jZuV)NAUY?{e5S)qSvan%zy5~Elrl?@1i4(e($-=b80 z%8+WIn496x71Gz@N=>B4k2+v^rBvmn6G1gJKcY?k@hl_}#5nA867gjzWQs6v#-*tQ z;Yz0}ScEMrDYJgUal< zj!vfX(at{z{SpV^H`~Li+TV+F?wE`Q0}_ zp<>+Y?JtQ!vWWQDI7B~B4T@wODV^KX9$_`+v^OiCAG@v}bVwV;m%rY(umR24 zqq`<>oC5C{^kP}YE(wiKo+9zfx=#Fo_iX7`@Q9iE=RWLCnkwvDSnd)MPieNJDT(7B zi0Tq54r?PdoGXJD*j#Iu^u*F+togGx5&x=*7ssGUm#8lou`vD?m!cI)V#;)062E*f zVr&1Hpso;-+$jG%!3#1r!C$GEahi6gcFdBGx^}R9tlxci;19IwC z5h>8<8qSMYC7PG}yUf04upPhS=A4P-U+qVr5(F%d%QwrLOCF`X~_z&W+e z9vrg@c-i0HH$pgQI%k_O(wIok>D<~dg-b|uhKml!XOo5uzwTg%5rX45hV(0iN=N#L z{w$7fH$2Wnxm4dNSO0*tm_2hY9btS0>diyA( zs5NQ{p`Guv1t+jKns`)%@HM7vtP;62k1Z%&1!R3~(s;-s=*(2^e)WQ$;s%uOx7ben z*lS(jL8KrW2$;eiJ3U2xOq}x@p??v$*53({?pY`)bH7%rnM_&>>eKXRJp~3)bL_%V z5Ldps=R9+rlBru~0IZ+~wrn;*yO6&}*G5x^*FdeuAs{}*I;`8<^Yw3ztG}71h;vR) zv{R_n;i&T>KUr&wn;W8=kj(T;E}c*@7M+NfHpPy+*ihU~Qrh!+syr8yliSzv+8_7xx#C#>Ur+%%E5hkJ7qsMoj0h^FCZ- zvmiEme`j=gyS+sPzdz`0<4#LG2njD&oczQ230Du-o&Xsvgt2lkdoKp@3~P)L?6`*9<$>#>j{vAwoZ6dsX1{2N18Z zU-T_XA&cJs6aDgG;$VAd>e)wejOIU{VE|Dq8Z2lz_~?0%oOC6nss_Se>wrcY)Ody7 zzGV`FbvGqdRg88KC#c2w9-gSqA0Tb~iE2Vy8J_pFtBkEJmAxam(Zp z_19(O2L%m#t4RjR{*Syju-*Qkn&66w*|Vb*ews;pPBZ%!^W>c+RUf-Pwqt12crwx& z!XTu;gpK_i^ls9reCb^xC>mXqI%PrhXvNTvO3ixBo;c>OZ5SKBw$ba~(u8X-n9~%>f zPl=ZnYi4oooMi5*=5?Yc6DNdhi4(Yu|FgB_$f7=B3M#V&*y z{@~=UF^Q|Ftr5F8DaC!4^D_(uHC1yrpYvSyTnd;5cbG`RU42J;HzU~m^#j2|k*eo5 zTAc72@sgHNt=X9utgm4{^MrqR@*fduX{eHbyGs#%gSw5-lG!BMN1C`L?oy^dX8NaI zvm%lsspIud69U-XZl$yl!@WAYb1I}~Z1Ctgn31wl2gK}1Y*Cn+hj(--9n?$lqZuVO zHbx@xD61-@hth`&K`qYX8K!$s!Zaw!Iyep-ZLV19AEq^vL@TEktucy(LY@&LwH))=Pm%C-R+)XM{Cl?*B^g4>h4tR*vO5Wktk(I zGSR3%J0=37^2*=owuF8#u*ha&O}{MnvZVhe*?VXti8bV#C@(9&>v|{pAc5waA8^*k zqp{d3x4s%(+PE=o{$iw$-85I@Jg1vT4Qz#Dcd3i8?&mPqUX({pF_LVueG)a_=a zmBOeBk+KZG&j+qmM3VN1@?FW@hJKQAk~&~GDJ8KQE5RNMmaQa50ZEbUP19Vz9`+{j>GFw9ee_ymP=p} z*`bKGj(HhyB0f)kmB8Fg9D^9so1ZXsRUUdp2f3&&*!OH|%=*Z1hLVc0qa0?m-1k)}Rx)aej?DwMRoB{Vaz6P6{&^>rjg6?r#cjxCfcDo($@HfXKVRQPv z)*}g%$!*KmiuNwvMjgQUjj1);y6kXhy^(%a>PP3;eZx+q=|Y1)n$OqgPL5>X(DRjH zY64I({`oOq9(8x83U^en8Lv$diy!1aWGQLKkD@x~`aP2qB&-%YLgrQ%G4L#R#pJ)$ zU^$sgkpjlwk~g%yI(&f}#G)!@=lxtP@3bXT!@W&Hqi$?knC1T4&i3DKS!8~|er{h_DNfi1s^520AjD@F z(Y#)KO7>4sJGROy*#}eYtnUf13>8AYM8w$#(Q`z3;tl63oh`{_Iv+?RUW+9)5%m&b zi0p_y(C;f~8~RjuNy75IOOxr8Mu>xZT3*|~@$=IkdJ|OP_sub^t67~UY8gB(!E(<# z-9~}@3Ez=TQ%Q)ocWEgL&&2fgEgdX5HI7cicv773Y!F??sqJ z62H4r;Ia2JSaplQTM`}$VSf~wPJ0(v$<&`ob@%YWI`^$kQ zpj={YF(z>7;vT$xLgjI=*}uX&U7HrNViEIu>aKL5byT%x?dh(eNu&v zC62OFWOD~MS1OtdDCEz!e?w|n{;pLf4wF4Qd9r1=p_xgrEvT`8IX`m^|Bq+#~r~H zloLt4d#5<=8m2Kowl<+}?xadr3Q}$J7#r$sSDa1H;QBci`jHnId*%Mj*jh^xfGL%w z1`Aa0^`;XagrTYvmy1L%7H}>W8|=m}us0=RciP2s8`K+_E$^NHJz%gDLi{+|ty*Vg z1k3|W$V~?pK@}-?%JN3PuFteioh*0PGu^1$L0OO-2z6D8(tWS^;r^9RMUkB;sL`~v3IdLwQV3ZNhaW0h4c$CmvXfAh-@(y zFeRl!56FTlSi%mxUCQ;`*98}qK7eKC)Fc5dw_7cFv+oh9S-#(_3}wx!IwT{N2|C?& z!^TvTk;U}dB((f&SlqenveIWSi1cT`r6zmjLTR%(N;|(J(jROlSJ_MBFs*ecoZsw! z4n32_yT)qu4(ak=zO}r7c)qKSuK&;IF%pgllxRzjVExrfxIiBJnbMgok*0*S*$8PikD&-FI`Ltt0JuVSg2rQB5#m0Sw%b~L_+BnVMg zN)Q!aIP5Q4fug1|aVd`B>wbE;wi@DxvM<;3$kq^cs23#Ziqnm)bEUTQz$)&%Ss&0w zvCmbAWK#})i-xLXzSI-79p!8!4hFusgoa8mh-6m!j2N3Yx&5MLqYumT+6qL1KC@_a z+Z~VLy^Km?S)gXpdlu>RKC9#CJbumNYzw^mIgjURI>l6Pv&Ofm6>djijmA7<@ltFEmksbfpyP;oR{Nf$CPEUtP03oA#`%lW01$ziy-! zJjr+zwHk!Tv@8hAc28W2bRTIbBVZ&LAFk`E{eEVTOEdk#3J`bJe${dongCs*1rb9% zj#uR?s}s#4`W;!cg!csE?>kB<+Fx8m6YkH(8-0%6DQ5lqaEu~38l|9=%_)Diis?~c z@KU>Snj_6@!Z=!J;Ka!6Iocr46r5OZ{1K9{CLg)D(#~@c9eyj*J+D~(F3P9qi>E&R zvvmGiHE5L{h7{$2F?SrXmr3+ydZ!OIFWBJ`iQ7ybTEAU^H`|4grB${TNO zx`vwIu4Op!M!l01=&;vDnz2l(TmelCHas1do*sMDp*yE? z;5n(FwO<(b*8j$$Gb~XU-hWZzj>0KRWoqZ-(V2JM#5z)nJ>gG0pZ;)Z(`dhuVWeWu z3Da!<>5ZMwpbfT;O>YYM{RF65@~eGO74JXn%=&1wuFImDSEkCWeXy${=DCri%8m^D z!nVK5|E00LwcX^pzIwF3IPRnK*9q&FyxY*{3G5$a5GWfVPYn*Gb?sz?jH*X2r6_Xi zzDrD%HyvC!LXG+AcowxQTpp!I^j4~_jzRYN(~=^YteUI<{@oS=t~$Y2WM3=vO@8x* zPoG=gS5vg4U8VwW4Oapqxck)7`#H)Z`8kvzIxK+8W9`54%Z9HmpOPwM&=p@8dmUf$ z+Xw;zf2Q_5PJ<}XON#at>oYBvP2O2o%?7V700zbkYXXeZ@lC2R#yhBX6767OSwjW? zIQ1KPia?Yj&Sxqv%@@gU0q4<*Q{b}T1<&hpyC-GA^wtn0P$wBJo|nG@*r2?W}pd zlKJxXz+bqz7=8qBJoVtH?X?XL(%6UXb*GX-9;^BHm=~8ug}UdnQD5tdqVXa$XX~By z-Yd|3eAltkSjhS%GeQ{Jcq@7ezUrFsQFdPk;#nQNYL84B;$CSc$4-zq1)~(DqqKRO1d=c8oprkj4 z0?+nBE^-WUec875eXwuw-5OHJzhHOWck{EG*$pz0Wwv{0{o2=J`aS+tGqXYBQcNA= ztuWR?5wNY+5?LZul}t4RAG|D%lM-SJ@D3})S}Bz~!NvXfvNn`?Bg5SDN?dVr%jXck zvQeBnn5uv~uR-nV;~Y(0z4$G4L;PzeqZhqbz7|)^@6^ne1a^V%mzTU}y=8G|V)f`R z+c!CVl-?bwb4tQfXXz$c8y5arj%(Z5U9ly9d>`mesdl0mn$%&ig$UYuG_VV;DF5*W~6oDq{~2qU6m^TK&W2>d$~~mPS@;Ac9mUQ zomB|S2l_O?2Y~tBkgAXw+zR~0JueV3LKeHX8_3~Q4Ukal*bVFB-}NSRl;4hEQaBwh zCb(*qw>6QctiS4aaz=v0P5~v^-L@CPV-QzERvCkjOBev(P!H6`AOIswWtV3usFA^x zGXMO&>opR6)9U76eomNeW@*yZR9nd{oxbP(*=uDXS7#AEqJV-oh*>DVa!hvqTK?y~jpY}{KL9|- sny<_U2SA-JAc8N6C%5<}c^si@sudb6|2?x-cK0|ePnGWv)6@L_09N{&cmMzZ literal 0 HcmV?d00001 diff --git a/tests/photos/faces/David_Beckham_0002.jpg b/tests/photos/faces/David_Beckham_0002.jpg new file mode 100644 index 0000000000000000000000000000000000000000..48e8a80dec3c7a96dc99a44766dd4b056fe05f64 GIT binary patch literal 11721 zcmbWdWmFtb^!GVfaA$%;f(8bHy9EdyLeL4G1b26Wga82scL^l8%@EvO1Hp!2a2qUW z7~ENY|7XwcIs0m#=XO>1i>~fdpVL*hzV}u?%s#9F$kdh9lmQqR007400eDydC<3rB zG5;%%hW%J@@NjUjv2h4+ai8E35fBj(5)cv+lTeTmlaP}T5|TY7Bd4SS0)a%N)HF}2 zXeg+FRR6sM1MBe_Y#e+X9DFKbLSm}_ZF}efkmCWa0oPaK3Sw_#vD_VEcW9zFr#V}p7!044?&7A7{wjTAp8prv z|A&kGkqZ+W8w(rvKQ0VRpT~+tj*atN;0cAiHtt6^N*2LTJSv66pVi&?tU@|Rz)$Xz z1W(z7*Vxhjq5Tip|39!V|3_s157__9H4h-d!g&06SmXdX!0mz9stLOVFGNn;y6 zhn8?UxJPjin82$p_bKp1~E%>n6+IfId_knrZb+M-6K_O+unT_-dk=mcY1O zz2Vp`G-v95^0b6^(!j)H)P^>nr*}U&0>Edq+s1oz{jX2RI-m*4gUe(3%VVWpC-j7_ znGvM>29b3bT%*KLJL>igii(ur%k>X1sOM-_&YwG#>t1jn@wH!q9*E@CE%M>$+HJKT zUYNnX7&+-`c+@EY$%s#FVm*0Jssag1x+^ZzQ1lL&sG+(yxLAuM0s4^$5l!b~kb|^h z$$&dtYbVct&3P+YlsIUn*XKfK&cayvruWv?61)0q@WtR?Jp~l z9PJ>(X`X$rxlqK169n-WmL|@eQ<1UV>-xs1!@wvjJ=WNr5G@oJcTN^@MJ^zXl5IdN z{R%*F9iclbNg8w3YF zIMl4l?FfGSo@m8JwiR4&Js&86Sk{@GS5@}M<_X4FuE?Zy&%rjRNz_b2svbM+#N*yR z5!yK@b(m?$l4;>$p`W9mWxIazY$iESRdlcPw^d;mP964U!x^I>slUA{m6jBfop_;V ziimNVUaTI^8}sI^LO!+{r)cRU*&s=M59*;mcQtS|w%FsLR!!3`yV#DPSAxdN8P+;} zfIHhJcUhFYmlpvb31~D4Kg`^65ol!4e_^9DE8cHs)Xxz41s9$=WIv#suVK<`_QXD0 z71dJa^i1)@LcDUX>0qYbY21>%EPhLdexoAS{+e_+eN=YcrXTQ5qnW7k|Zw~;dsB<0tYkf`zoy~S)HqYxcTdX1~Z3tTR2^q$KUy*Kg zC>BSO{_zC&EzEbTop9}<*RQ_$d759C@*uqf^qKj+@Q*==nh>Aj z7>G-+<(K>HY2LRES-P>3Cj}L%X7^WUU_+4J+dOzvJ9v=VOr*YfO87EctA3&O0DnN0 zMn+~Y1^tyemj(Sa4tW^ZoNRA#`F6LtWMMa9*E{QNjE#?H(6?q>F#>7uk;l1JFhJ-0=WyWQdVJ*d zKqrj6!py{6h4P?*TJUK%E@bm1<`2qbW50ak@0M5(>RZIEkj)0HR$`H za|7#D`-`h~Zgr53pK?JS4?pcywR*1?3q<+VUk2Usp_os6BN|ONx`{s4-!~CLmSWnvJd4wxk@q-mCL0F}uao_}0-wfWP!QNp&e|7leIy#$n7Xc}b~? zsDF`oTkj+Nn(WZ_{!4pfS0}!UaK#y7qty{3nXuF(59)6k*$ZV}TMl&;zM*|7V&Vb9 z)Rjy1HlwZmrK-56`Mp=lVjT!g%q6knhdDjpWzh=>~tXNH2PoIQX!{WTcuZ?X4C8&8T-9NRYFF*k)DX$GtwGJC0(E8c$mmbx@u<-XKJo|CnDUQv(Okp0H&il5^ za>QF$Q*&9RQ)_!nQWCN>Mm`ZgJqCacL*Y>Mo2z3;NCPCo=r#~g-S0=d?l%*G4}Oz6Xg1eyu+ zib3PIDyyw`1Q>2EuD_Y}j#svrQ*Hmgwwa+8bE&u#R4ukt+us=e_Vxk5Z_~MH!V9ag zEB$lSGXk7~NQz@I$NGwe-*Y?UGFy@_(LztEk-B-#XGoL1^TrwEMbRG$tSxo2byt}P z5UC~Vkh8iB&3ACbSi7Ji_~VLAc?>kf(YhH%q^iZz5!*Q>`~aZ3baUZ>^#D!kbeS{m zz1HqI;cF3FD&!f3V+2_{#Z}FTe;{2x)nfrV@R1WDF?lsp%@_MvrasTFtqb978ct?Q zv)gxBamrlu+IF`R)C&p01{`K*98S}T=<$}e@@-ovw8cuj& z@R`1iv{GzO>7rZKuXhJS5~VE1J;dv>sWNSH4k^kovfMtTyO=Q%7)QF~>P^y1<|f9y z30Vr?kGCq&<#{e%U~`<7@cFyBk5>fHwN5}U>%dI<3p)8!1;26oGbffFQp~^3X_pbl z8>@$F(RugPFgvQx>_@fMu&l3X@eQML9?mMS-#|XKbC6OP&)Rj)(K%``)_oXcRT<2F zzCn4FbwrAB8F^1GN7(Ph5XvI(t?%_}W4ryO@jz|S;d3U(543oD$$OVz>_YnhXi-sK z*y&X1uKN|W)}O%D93SKU*9;j$zFqZF5?uMv0RyPI0q|F_tHQhy-gUV>I z2UGT7v*vVxZy;acP6Z@F?3+ej zA6+*(vd0f)m3S7}raLW&89&@g8XjC*5-wtxFS%SIK^(T(6FqX7GX+WB<)mj8yNuC# z0DO%QYiX~w@ZNq=%{LNv33F%0-Fq|4(IK_SGARWn_bcmRUzVTBKI>GUCW}baB1NZzkN?tk;+# zL7{pBtc3!#T9k>$Yz*PDc2XTULE`K`p~ogxhcIpi5|X5*E6Vgq$ecZv%f-~)d@w~F z%YLr|9Ta0*M;op-a|Scv|K3Y!z$Nv#&>aJ8mD@YpAC@BoHzB0mN)H zkIvcJ)&uWK{}Os%Dn4#MnbgRr0Z)qF3drs=9Yn?sB9ou#xR`6pE9SY9sO=(P4}caR z)&4FcgVVDqpTl^}%1sW6sx*v$6W0dC35_1&Uyz7l<9nm^B1_{otA?SX6t8~IEEkC7 zWd8B~I$rw)QC1Xvuf?Ra+#33)tyVy}Ntg$c-?->)OW*yoB!tBB-(K9itm7r!ez5;; zMqz5IJPj@Ert3FX7Eu$;+xOR>nk{Yp-?mmY$H{PDoecl{h}9^v^d@-i>4c-X_X1S? zweRPAX3%~8dYVy0ppJ7U?RukJ>$B7C2SAnSV0@qobPdY>H?m)3;;|RtT3)+M)PU@jDSl_`!d@{qtm*9rD6?ODRaJN-ox8 zCWu^ZI+d-@kzfC6R0YZV6LHkSj5m8L*@CbUfF?hC0C@kHxu@8rv;W60@pkq}$9hc9 zQ*W*CIGOt&dUVC1c5cpp(aMWHK?uoe(oO9|h{x_Q?v!Vu3kAtV6Rc1ie zwF_!HTzJHG3uNE%af~0-_2`)C86~Wqak&S*UC%n((aPdXHDIXxJ;kepsA6^fsc9oM zy(5CtSbLnW!|I80Sbo`h4*#s#QtWiJ}hClxIo6IF;cx=9*pUnS_w_ZbC`? z7?~kFu_;HaUIHKky5MqH*r4Da_ITzKJThxsMl^oDtp}?O0kTrt6DA z@A$=G>ha0$2*b$*6W%t;AKR`sE3wM+2+CbCYL{mgTv!t4{f6~NXJ^wnD>ku(5E$^w zR+AJ|)(ObbkgH71mh>a;OY>DqgNU2k;=Zn4$8=TFwDp{2A%%RE#sr^5kCJoO>o7an z;x>*EgQqb>^l|Su;~a|W(O>3A8KD{0?z7_CS#PuJe}BE(WBnV8kGgrqenEF?N@ru$ z10gtJj+CWBxUfW=TYaPisEQo;TW2y5{M2OtXX?+*UX*ef?Ke9(WN0LxfVI}@T?FKJ z-cKm7utMV1cLU`dX{R%L_=>DF?Y;EMisImStI_>Mge^)tCeGDm>8Y6r^nB~<<4GXz zoF#VP;&#?{pi0RAhI?3WHEgGqHJYMTJf`e+#%4g!Bf6sVS#-nsKdB+i5cxelZ#AlH z<0s922HORrp6@bgi$yXkKg};K#gT}^zIORgE;mbIkT?f`{qY>-9y@5x`m!qmE4Ofe}r!hVm;~5ag=&`Nb>-YJpToqy}H!_gQR&3 z!;;KPaTH3q?G;j zOFZxyxBHyTi1lWF(}fc{m%8nl*e93@fTdMhu!)1!Mog^MNq5dDp>P~!*1mr#KLx5eLv-HDoWR6 zo>^;0)PeDf@xlN(&<$Rd(MPzz^1Wn1)v23!zH;>AdEpgVj#m{^&}qB@)BYQ8#2g2P zpNJdmU*PxMP4Ilh2$6xyO&><&MGIH}CC4~eT)?WqU@NuNV%Mw7B~@Up_3h-+-?R$! zu~|YkHTMC)T*=u+ZlbPLC9>(WLdN5DAnyn*kStojeC_5Dx?JNVtVwT(*oamwnyS)G z3keZ2M#M!3U&toHZ;gu`)LQ1fMj)$V!by0@b+?4%98_CxcL#N(O1q)cyF>ebFay+- zXYv5x<6!1RFx^I63!{5#VmTZmdBT5&7iC-@iqA$g!-{$UK)i#Pqg6L7Ja_`}pyOC4 zRTOBs{&uat=o%Fd_LMh$cp0a#RQ11`Ue|Ohv^H@kDot02941u98>{&-jPXCg-~F{> z&*)DZtg}1>n+_dDf!-Sy3|sB$WSddzGkHQZOSaA_C-D4=fQ&D&gV)smlY zrg`sF(ZOL5Z_pa|_U8kj$fg_K!rbFgCj8!jR(REii7W9XYMRIH1T1WtIY}!T>q7E8 zUH6N?mxbEsvj+g4d4}}o5|!tD9~0lo)(`k<^rjsdF4z~UXZPRrsk-+Tfw;U)pF!nDQ{tcI%Pi*W z5K&+|iz~2p{(S5#K_Roz+P+ITVgRkGZ5Ajza6rR3UD z($h(@f{%T^*?q=&>wti<8S)OD!4!3tlSC!KCV8NVP0f`Q^z@}^u`i#|J$y3%J$pB< zkHqbcXC7h91AwgVLOb?^$x4Y9_?KEh(Ov!bMQy()ooK8xGS^W)u)lCug8oJX==rfR z0_yo1vS4iv`ZC6^X^swUd<@0e5*I3KRA@Owr4$1!^)3uETEv$Bz4HqS%JXo!?*>L2 zOp{Foys*e2HxN}hYIm%jyW)U{xg~XnK+y#E`nB3~V@DoZ*Tm*ZplM*iO z$dXOHoL5TV$E6*&Ut8A}js>AZCk*cJjg#@gj61JGa&$!r+>2=*b%AgEe?xr+eO;Lg z>J-RZdUB+rVkjZN7z=PeEgP zJm+V;ya`IGmS(ELE$g2rh{iL+X$~&{^%>NizAvd~J-EsGtCLZbxdrubgR9O>2FGEhF$9 z!6_f5eY%g2GFKrb_M?{gX~QN)g3)NY4hhw!H527bn`)l3n?YHDU%ADdE&iQ(LR?8| zwB{*c@J8tu7pxerL&=od&E-CHXpX;2N7q{xuKbDNw7YOdTHySJ5jcVBe9y!nwPMtI zZSosS;dG#5F^5J)d|pkmsX)45GX%I{VW`6>lf63;3*;en2&@m)%*?=(W&p-zexdjn?dH*N8#=gspIvl-TxNg9 z$qn;Al>ehb3w@4erThIt=wzk87CIhPPtBRfk5nYh7P0B0sJ#Au`+HeYKa`E=8r-P0 zCTkBvg+;pouBW(KchgV-ge)(*S;a*Zyak`W=;QwwKL*Rxv+#K{O_;n{(DDHA4VEm( zRq;bTML6FAOYk{(Y1XlCz(67;PA}_mMo`K0nRj~kIr;XZGFIPAdsbU3bPm0yXnctY z_|bs#n&JG1fJoTlGV4X2y;z>T6BU(bFqtDeyiMW!_?I=oTlrCsA@_ENfI+mW5(*z@ zN@$Tv0=M2{f`P>0=Pjq;Wz18FI5dO1-UilkdwRF?b?fVsfxOlMfOgjLcLF$lyE=Pn z&rAQTixq4;BiXMj_Du*25ajE2)=7E)u&C6O8X>B%V@BcUHxUS)GQ2HoVyVyuG*g(WyK+BqU~$7I0J@7H1VXcj+aKkaUSSRpmv+VzawnQtH6sY zoy)l!k)37H$?fUWJ^e6!+Q1*(A}BM)Dd$sA#_8VBu$l1}AO2I>`YnqWYVD+8TvK*y zbL86btzzlxCkFDVa^Ef9k4>nTs(`vhW_()~_OAo|qlw2M62R`+-CxUZ)PM236);IO zsji+98wGv8j~EpW2_1)e&-=6HhdnxZUu&Balu_L}=@4zY7>E2 zEx1fETmbrh8E#jehgR-!LEZRwCKBOIp{Q7O_^P5$k*cY$;D^BbGpGA!JI}w0y~|)f zwfZk5?5LSPCtHNlAZ8Ct8f_yYq8xQ=WTqSoW$%M=9QPzih>(6OnmUuwx1qL8j2I)e z??ana{b-J2Z;nk3Iu5-kx5r*eEXwzVnN#dq=(MDh1!D?qwUwNwU&eBz7sm**(axn~ z!30dlLRTFI;hZz|A-<|TJF+!_^lQ` zQHtfd&2;vG0z%rs4Mjk%qUGyqiGn;QIkn1+xG$CAN)ob(K5Jc4-#cE6%pVyEb2>9vy zq^%W!WDv|y1>Q;B!EpO-MwY=LVb|#M$k*7=fX}mF+N$Z!xtQ~BlR=`s_Bf4A(JSUr zuacHBJQ@Au6ipLLoFMF=+;xW%{Cojni;t@iFgxmB2pu@aDet#ZV;iootAzN0Su6o9 zzXk3hx$(OmY^fB=Pz2-y68Mvrg-iXNf8Nu*T%6_!vu$@(*Wd~tdcSdOHu>D)BqTj>_hwaeg-*HNTk?8f+aO8KasOLfoPYhQ1SQmgN zXSSL$F!~iPxHhf@*}Z_To`6_h}W; zo~u1K)60@rXusb{37hwT>h7(8CsufR{)`jAFn{RX-i`^8;(j`wD_$~;RDHiLD(k&M z**w?Ol%iIQo&mz5)({w5=ZlpSy(VqEwnVA_L>{xUb3gi2M&pegRq_ei%mZa?w{K+~ zlV@Ri6ca$NROshjyV`2UmcU*neKu%x5=?Pvy`q_+?xbcZK?~f=irt@Dtl>YNg>2BB zjMK2r8*BKv4i3jB=c$jNQo{{ly7cSd0oyj_RXpKVA3~Ww#;zra333A+FDNp%k__oR z$NIBcNTY(d()*Si9aXds{F%EV@13b%`dbUU3f>UFnbV=&^wEQzIhQ{o_ zO8Dz?zvAbT?)1k?=n_;iXzDRmHBTSvu86#_@G<)7DMZe;?RHzrw$FjMeYK4J1(Wu4 zf7|DSn@M{|Do=UsfDgvBLvc)>?gOBQiiaTY1?36Oxc#P+v(=ZSBu?pT=>D)8H(@LhTkxw!yj%Bt#vW0w5vUwgQ4vFoPY%L5ndnO3trWtKCr)d)! zcuUF?@Y*3K59*QjbX@oc_ohQW|CWGo-GZE~$+?+>v)*ApKl zIoLx=v#=*Af=D|cjQGAL{NTmE ztr+8^=+us$$mo-mzNe%vK`}h9E!YeaUmv6K`F6LVBH8Ya1Jq4tFdys-y6lM3%ZqjM z{R_9k%9i3x=6R_zA_Nj2KKx#>&)AL4jMYo~#Ysl)oNdIMv zIHku^;Cc(`fVfk^h)IO30u{%({-9Y?~HX1Y<<%itJ}!OY?DaB=JPMBhvu^4n=w3WO`WO;T^w^SSX zXJYe@e8}JB)Cd6zrCfJEVB;KXnU!R8roUdGH%$qb1TbcCS&H;gmD9rQ)5cL6vsuzA z6!96;BgS}j{aqOGr4=E(E&EYHW?L11Zowrq$2mUa#nL&e{`!QGhGPO@dv9~!d`eoD z6FxB#b0g;=CV*_j8>Nti@peJ68&1f6{sqa}#%H{X!?9d*)!mg#j$uI*=F7qV7 zt7>||t{@kV8{YRTg~=q^oiTs`DHz)~-S2AueWA@DoXpxV1WBR_gZ5|Vh zCH(K{WfHMBKT8D1;*salC798k+r4>7x%R8$B-*jFW-zu&n%)luW%0TZEK7B=43zC6 zQKi}G4=gIbeEbn2?i>Z*;J7sT*%w(VE^-s_a|c;t%Q7rXzR;#TTay2)yJ~=M-lFRP za8%VHv^TE?DYO$>e5scIBezbw@*u1BoxnJ5q13aMQ#QE^ix@F zw<<{6bqzQc`OU$v)6cq<4JY>vuevffrccV|#KXuJu!07qnd6xzZa14RfBc z>ACSQBn1VlC-5$qXj+9~M~IzZXWu{=(B?4aqsJY#c>&oMz65;4_KmnVHgC*dva(wK zrs;Yob3L;mdwp6J!b}ajC%ILaI%~Bv)wk47l<#6KIV($GZY$TY;C>}@cgHm#yVX1% z8N^?M9uKQqvLE)!1A3$KwMJg#q*b-dBhn_dAg4s)bjshf68V|Rvl zAxO~97dy7t1*;Chu>ie#x3c#qrn&}=M>U#0Ws8FrpJ`RBdSjSdfsG5 zrp=Eob$R5qKLB=%hGjh%&3(ts{iA}6GiHx5%7Ce3n>4x;x_Zvdkbvkdlb=CY3(C*KOY?V0aiA@&BbmgNc!j1r9bA!1rj2}m*GKbEg{4Uu|Xn-m!xQ??JK$vDgEB7kg(7JH*q6>Lu?83Y{?Puz;{L;u^fA{#DJ}ahgeRI+=m9_pAQ9g& zaJ2NB2^iU`z86}qTzq9H8C~&AyduTcinT<9qWb2|d-1DCk)q1(>Zn1HbUIq=SbqvP z6DTO%+tD{pu+}(f9Y?OnqHlq=CkKN37vs6^#8@C9{bS<}=R+6SPtc5r;B4ukk&dQh zM#`-L$N4KsL_G}yA_XU5F6h7@`*1q*e=ZYGmD55$^E~wt7^HQKdkbPQ#Lzc!@^*I6 zc#RCMg&tCVaEz9lIVh#=hw@D|^} z$c)0NzFabC6PX4Cp&Ov>d0(V?LP+}}gZdvXRGR&(SOG>|i2%_Zfbd!x(6l`sV!B6hA zVPg7^3yDMOq4sQ`QcLtdt;sa;ZV+Yga}!Utgl5fx4^S7fYSDRXHMy4H4G=eJQ|DoH zl%t6cj|#l7bp0>4df#6H56|MM>>_|Xr~JFo3wt|Bf(Vz^S47p2-yl-^r~&wn!D;a2 rWpI&x_Py+*M|+&Iy$4UBH+m0%K5oLGX*o1O_n{MG_+A)phQ#yQ}*4|JLKu<2ry?RZ&F|fQAMDpgnDX#}$A)0R7ps z|CA@gcq*7Un3xzCn0VOOSUAt|oQD ze;MF^8rribAF;4;aPjb;8Z;9Fo}r8slR|sulvT*e{VOgWB^5OdEgL%rCl|M{h^UyjgrxjC1w|!g6;&NwJ$(a1 zBV%hDTRVFPM<)+YFK-`TKmV}sh{&jK(J?8h-_z1FGPAOaic7$yW#tu>4UJ9BEv;?s z9ew=+gG0k3zeZ>0<`))WOUv*L#OBua&hFm+0qXDB`NieGtLvNpaG?Rv{~PP+_}^gv z2N%f`*E0+ZbPVkOaG^c(dD_uQFfd;TV3EGj!nSlHdnp)-L;g0ou>L15i;(syg_ZjZ z9wn>r1{>->X#Yj_e+TyU|BLK@f&FhTIN&)t+S9{BCjrO-kR^Lu-?c2#8O3!pwKR$< z*Odve9HmEX$zwDIer#!ds9md2QhmKTSQNumCBWJdK_21mp|V|n+ZA-MiOm~p7M%jd zP0s#Asq<ew<&x6P;QC4(x8E76>EOh=w-Q0#e$2?iWv@PQH;N* zwX-Kc&`sm-eMK{OvD(YZxCJ(MG4w9Iw=pwwO(r?lBp+;2O@ir8BPJoQo`-YP09T}6 zoIXf7Y+_=aXaiwC*TMQKUGdiVjZvqq{gmzv8eO32Pu=vTatnCpc`SXX!6ss0age7=xa};AK<2ur5~I+jtJ12ML5ZqybHiPxYX_WZW?#}NB7E=5w=2I zD~Ph_AB)vXQKl+Ty(N7F^v{gt63w;~5nsu$DFTQ_>mU(j#?lpr(QlTnia-)PzpJ4# zf?ZQXe+UIxy)@oTJ%fhm%xuqR%NR! zjHT&8vdS+6Swm4vw7lchQB>uy&&6Ts$6OT9pJ}!%NUd=T^%k;UG-uR-l}haGGS-9c z%N#j=6$&3Srd#Z;`2uU$#2bP zF0E;V6ztTzrhx{n|7cTLFT>u)Og{o@yf!l6O~olpq~_PDsx504xY8<7A&YGXA9XJU z(kU3j3ys-6_q(D*eaW3!Q6n-qC#Hl|kR@OA!&xYdO3dT_)-RI9_|J4VvqSgG`vC>W zabqY`ZQBLK#{AC?C5%&z{8A)oHJZ*vO;mpiNMw|3;$y^eyIA8Z8s-a%o(<&S*9{43 ziZmt!8_QSKDV^U?3R`;B0?vK&uA-HJa!3MbP@1Fq1&(};@_zRDJ(EQOD_x$yCwtG^ zzEP5|F`}V+izAsBZu&SBBT9(!mechI5xF}yl><4+LrFd_<_HFd-!h;~3v$I(!(h*I z6i-0ZI44t+ntX}4h|-572$@h)st=cyvHj+p14_`LQQD4`&kAqiH*tE>(t^j8B?0@g}v3<~~`~Cu7;94PD4yOH^uR8(7IY zgrFlu8z6(#8e=e_=S=%1*-9}YJ%k!bzlGQ}UNZn;P_>|P2Pzz^`A>G4jMff%kASz$ z=DR;L{*WfyC%!jy$w+TgfX5#LHYsMN%aeX0$}h|$ShKZFiSmLJcoSnkSOEJ9t>ZPZ z7t7Vxm!LvI@-nl~Ud!+HF(a@$86OAA;|b`PJ$>aYPZ?#G%o;>wVk`2m-@~E$=&sw6GrnS>8|L9YRW!nnQA z8k7P--HcCS>*?V|e9eY^au51!AyU%A8#q|v+p2fvSIx4fXwzUYk|7l-NBb}Fxo^YN zP_{#RW0GI5Z93Fc4?kcyIi`5(gfXJzYRyb0iq)#aL6%3g_{1Q!k?Mmc_H6NQC4U4s zu|jP%dTYqEgK>&!2=_zKO*_?)f}Fq3H7pD|No4E_PlR}dI3T1l#)Al`hB#3JSycje zmhz!HXciKKQp6gY+Og@!yG+Ilb{8+J{}Ed6D?jiJH{Ltk9dHHnDfTlu+#z@0_`mM; zzT1yH-92gD)x5QGkVy2Om*rv^SW03VY6}7lJNw5`G@AQerhob}T;Q+ZUL1A}<9$Er zL1?XUrUfd_uG$sRH=^;UwGinUsb!no!FU#!oy8_z={Ua-zHfZ?1F9lMn<94YM4K#o zWIx%P)IQAM&>AB6;qbNAVzEmaVJYq0yY6s3kAY+5v>Ip8Td`J3_0e+H+nl%Cz zrRS<*>5~O(dme^j{&8{yP5Z(S=IV~s$r&Kmq!_(d*i11cUv*s%1=#<9mb} z=F`pQOeWwvTsjHmtGsXBG`)+v9rPy1A1T`| zwrZier{gfwS;b%O6Dmjk>jrkKiKo@NhRZct`NlMvA9X5Y}xfr6kQ!PK3^Ifawi`;^5Y zubzGe>^|=@twpeznl^`b-;df|(~Q_|5oCuONS*R{c60%2J`{VEF>Ut&cvTU!iM!KM z*P4&yz~6ob%5AtZ+GNlc@b)YMw`+>jE&Zc`6iXd{`r!b-dN>=%@F)e7UjNt{EvBm~ zofT1ZmNeYTlMbDxtX^Tlb?T-d$<8S`yZnu*S@m|K1qj(5~CGi zed1qW7)53M_hSF|{~Vb@CgSX~!{-KOOkbzGT`97~G+9i>0&P_)&ei`-jR04mzhE!^ z8)3Grb?qgcZZo)RZ-fkIfv3Y zgV*v3Tu2oiJM`j4{jne7(jFQD&Jbd4o{A00uiR7$-|ylJj4!sMwtpX{5t@w#aN0;Y zEwzkzEc*w{`pI!(CtU2dwRH+5+FMnBR^h^D;!#HAAb?yiW;0cl&H57-%06OH;Vf@4 zrcn)${#hDVftT$}_>1cnS0`%XVg_kqUNu8P`rd?CZkX_=i(e;k0Mak+s_+qZkE{=Ke6t6%G^CBAA86k4YdUZpN*S$jA< z8%@-@KEJGy>I)&Hs~Hg6?0Tj##TB$Ja15QrqdAf`6Q8Q=U2Mc z8YBJs&{!(xeQu-*-Xnm;ysL1p!#tYyQv@#4z^IT3kxVtDUGKGYs2+{`v3Fm1M{)0q z)7BW{x9s^QnW#qqFRqoGWHVn>fF+4e%D9C2!buK703f=#q%U)v;V8j6B!o1s_}!|y zlF!1_zuYV%QLH;AwkCcMgk<_~#a4yyJYBCXv5d5MyMKu?5*x(=nq@Di1{7rK?BH6i zPbGTc^OO4O3z4_XN)nAx5Jd#OxzDL>bNxLmK2R6}3G%3PG`8yku|a35PUt+=y5C=w zZP;DeZ+@v6fRFod0gZG`XWD=2C%&As-Xp@$q;q6`BbP(W#AQ7REQzPlKrD~!n>(Jy zT*@fUxy$NBdMAAClCnxrGS95dtcI{`x;b*~@i{J)&NUk5zR|FNaz)gu>;!1nQHsSV zy2XTz-^>|n`wjw`8K<(<`{6|#!gjID=!+>a-0gs&Y3Yv{z)Qae>*Y?}n~Q-<0`l~u z?^co?L!cqyJX?sDs0!-~4bfWzFOM3(#hIds9_l+F@?c}CB#+N}o_Ckm9^X2hM!4qZ zCWgnPc(oawff$KAUefVP8m2VI5^yA7hzcCI#$EuFcACGokq=@S^ul{yb9iQCLD5XZ zd}{UTlc>Zcg^ywKGGvRXPr>aEt!u#p3754d)anti93jb}PqU_cW}Z^+z(L?dPp`e z41bemaI9D@BqH;3gXe!CRre0YY>CyJ!Dy&qPhE`8d3Ojo_pLDpW_|N{4FK$P%7`+Y zhJHaqw5`58R6W2(2Yc(I#%DsAG>vDaSZ2!VkVtIYpVgtyk?dl`T~*$qS#+dd^=nM< z9a;M;Lsvxf+gHg%@g3o>|0H>fFA+Qo_`2s14kGUGFXlKHVeyUq=fWQ9<=Tgj3QJp$~>lbv@-=?^gsnu?!M z?{@h;0)9@Ac$&!uAcSyM=naYZfHey*6PHS%@X|Wa@uZm z%70)T%YNS&DT>LG0%$2v6o_N13aPGgOE2cG2d-=ed=sV{$f6ha*cwn07lCZte;jni zSMIj{MMrs;0Gtdo3goTf7w*g;RdH@n#%C1W+AZiSJQa&}mDZ{Cqs)QE=a47IxlIJe z%?1=0cuY%kq414nnzL%yUC-VAmU9zPoMZP-1QSsyp0SjF=n>|q2?7lk^n1fyqNE#@ zS<|qT>CX%;FdsMxfW!N)((k`2;`~~1@fF#&s+&n!+O1Mo6{|m=Sg5WxGL|0Bt&xl~ z$h*@u;^X}h#RLqj5m=FUNIQ-ts6J@u%TysJUp+Cm-!v1Hs@O52dL{&G_I1i=OiTDg z?yNn2t%Jx1)5EvHS%mL=@yV^F61zNUi6U~g=bI5chv`Bsul%I_y=z(xU`_E%#wK(! z1bkl7E&&yiM8RpYRmFShGruZ9@JdJ(vm@_9n2r4piv2lbmV*QBCYP9QQX{CB4;2ahWv5{LU#4qa=V~Y|EM|f$PiZ$b9Ku2lqi}vWP^d7E4P6kY3__Y z#6+q<)`XTVvga`gR6dg}s?g(2z9eVlY@Fqjt|+6Kf+(Qy2hxMNPd(E`Q|&f=_kx@M z5+`fTJXXHZrA$3 zePbRW^fUY6fAQr_VxQtNaH*pcYp*Aq?uZvRrzd`A(CHP6)BJl1xE3e=M677_q)^7? zmaiw=!NgqGbM|W4BRNYp8i)J_gO)F9cp2xcT%pD`F>AW-SU9rJOgilwuae75RwSF} z1uK%uN=Q3*|JZcVv*a0 z_@&v~R!OWXP%js__!=v8%sJDpAKk+dZGU3P9szcSn%w-OM{$e0QIMJv!+~MjhVqjs z)i6X&t_TsX0DfOW8`At;nU9CGP?x!9APi!nEYhjg#LU(UAApGXVUvcy|IQ}8tO?U7>OMQ`PgLm%GwCQn7PoXNNl;pUE zmec>h8MBNah{Pa7bWfeid^K?(#->L=%MfKzR+_YqK6REP9p9;^7?AUKGMwPztRy8RJ{|j7vSjLei$p#&?9N_)>+KnjX zusPwd37jau<6LCt@nU#``ZHFergJ4$_IG$HU;Y>WS~oNu{zr?cLy!4Zm+3TorH)h* zJ>M+>*pl5oap5TDJ(Yak6Sihr?3|_Nf<|%FgCotl#yGD>%R}UUesw1UA_T;+z4ysJMK1?e%v5vl9nYYr@0gKc`AS zzf=oQ5DJsfX_?tBag9z)k7y76E{%pxfEvkBwg$Kt<_Htc_g~b%d_X<@5i^9|4xd%* zq8F-xD}ooB$Gc|GE6N|19sz5vlNqgl0U7JyjLWZ6^r|wSo2RcrHYlT9KO2KQ`E$3* zA>|?%5xG%38j=fH%{T*?Kc(v7-7t@HTy3R%-;+z*S*X1SU-@FqzKi?I=$5b>eZu(D zN5K12l#Z=K`Z4q~G|arG>P6)V;z~FqmHbyrMW<3> z#3U74vH_$Yj%qT=Qy^o#HbTpxXhrGtuUUK@Cn+7q&09j6v4^?Z^N#B;&ay8ux8U>+ zOV2;ag}q%d0dA4+63h2z+wWuI2=;Wg43s7qX{Y&^RD?X5U+WC}@7H*$%Leg(!m0AC zI#XNgD8Sx#$|l%y`EgwM=^ioEHXETB3*rUh{$vMw-k`CyHlPrN#a{QElGOSU;Sz^y*!nC52Th!QsLm6VHU(4Ietf9IV||1{{i z1#TAahniU4;!>NJWN({mz7%0jkUtQWT`7MZRTcG)Rul3a164C>TV zld>vYT5kbCPcPzsPa-9>T1x|9$?`=#;c>>PT-nMGR#Gr7L@iwZ+V#DDI@w!n%ot*z zVdt^D!_Cgh;bmv|)wP`Q857eKVQOVRk02m5ZnIRP7QgXowbIG_y|(FC*Cg&wL`RU* ziq3q~ipl0@70|!7{5OP8WL5Lq%61P8^@Z@@ELvgjol3U-57Tg?HL60 zR!o{(dri@o=F`nzCiu~qRxs27Pude_ISMC1{=7$PT~@+ZVt{^;!;aR_Qj}&SFNBXj zB77$P=bIr-if#Wt1D!-s$9%P;6|NW7B0Fm@?kio37WBmn*(F|c59bqDY71oTPKMRe z6~l!85FG@Q%-fp$sKU8zBtlf(dPv-9bA#Ob?!3H;bCJG0d~&;I;x%?BZCY={F9 z9$%s}SGV@CM0l~=6^yJO>FKALAKp%)_GZSIfO|jqtZfPBB*u2_n-_%AVl|qPl{st$ zT?FQO`Ny0-~SX75EO;k@sAISR>r8{`!cEH0L#3E@0iX6U=d7= z3s3f5kfXg)Z)(QL%`ef~y`9F)=B|Y&HZwWN&ZAB3cfUy0;asznIj~0&rAg@KP>V}1 zdbby=ZG>R|UR@b7_R>u6cA0ISqIZI?R5X1z5zbsa%D$D+oB?&4C3@Z$n-5!@f@9#5 zHXY#vTVfi(pG{eOB3NI!T&ex+=@S`ps=n)&bF47|Is4o3l8P~YwUvP~HE$5{xLPlh&9KNj@`5q!^gA%8fk)&bX(v9gj{c|sUdp>p`Lg|Miial+k7#i{2at)f=3)52C zl+n@R{bp`Xje^A--8om47L-Y|Yg54RhTCeX9^iB^&CnJs_rD`jgbNdTaDDOh+@j@H z)Xjx(5TmtA*6l|LmTu4ng9i4rmr?1rq%U{BVQ?l2uUl8*t2Zwd31oi2<>0m7z3v70 zNj-xZkXa>@H*VjpJ;0|?(eIM`SzYK@Qq6Ke&-Nm9MvxYw`!|DA5w*~Ufj%PRb2H$O zn>kKQH|_zqggOgHkfwFReD@qx`oy4FoUuG3TZ{zx#pq7E8Kqv9^^5KcFK@U}+vqX! ztvW;UzZQwxy(U98M@Bf7&QgtlAuM|;qR{1&r5$6m$<0$Pax8X!y#;p@t1V#a>rHZI zYyN1-Kam;QhJ>O036{dSJJ;ftrK2Nl(MNzSsJ?!|vVHE)HiyhG#h9k#G}ipbl4e8Jmi5Elc5*nhT7PCE$mgXEB~8#Q(U}{l>1CVU#o!hRs0Wml$p& zOuiZEkF-Zo&olomqdvDb@~CnO-1bk-JG8p0xgF0eDpmCTFq@*uW)^QqcPtjAfsZl zY~MmwIn~%$dHAJ~5=YN$cLSExt%wGmcz;4?MBx4A1o4tnD+xaibpvW;whe{bte!@k(N6{M(5*3!;z)GOHCcEcWi=xF> zS*{$ebg7IHrZtILucwf1z=W=?C`1H}nfAJ1ARkxRgSUQZEwe_|WHsQOTxW6|s8?%A zul$cSwT5oqR?r6306qsi-znE)u`y77^HzNG+R&Z?WnO@yGmbySi0C{sDKpLQjAe= zQ}GM;E+E0fs3cNCI{W$;tSMJVR#49?u-ZfQR(UN%GJ=qLK7M-sjT#xeaB^?g=Tquu zaQR&^QoC+tt+8!Dgp4e|ncbb>T|Wmz@5}oF9YxIw@bWR&D~~vB2kaLTNX-cm-2}%4 z!0TO_autaKCp*)ftnGj~^!X#GeVN%SE=hZ8<|JU`Re_IdshQ)=clWOoLumWCu!w!* z)t$tm=CLgHq@bPeFd9T{Y<5DU&vT6xtG5C%w^u{j26D<(=0N#F&1Y97?~6SWCp*vF zT+93R4rpjTKLS**@q1;Z>~+Ut{;6XR?X^JL#@vj~azB4r>9jp<>Dn?a%PKQXZS+w< zwDL5$**$pGpF#el=BwUwGf|E?BbW&NVtOZtpJPw&lwSb{HkwY>=+*KvFRN{iC&2`S zPZ`Sv+P-%fjeVM9`ECDdJP>)IXZj~sY$Mc=0GgrWFc;q{bBPApg0L-z-ZRVVFXZax zIf`!o*h>QM191BFDF$fd^Fc1xmmOG#geMToDR0rETCok8E6sdti3&s3@PbeJnxQdra0+k+nr`fb*Rs4L?70~&EjHAI7)%j%{I%q20}p}T{`}i zmfp~(l;-a+#6zGZ-&?f*s2)LuN>rFJ_|s)Gt0(wXDUZSa*(c{qswRnGSx1t?A5MFV zHp8@~`c^`5D82J3INnKr$2gp^i)eX{L$oSM~tpL^4)0{hh5>E;IdoLU^0m`e(jFTbdb3Z&aS(&_geS^O7Z zAOY8;K(0~T7u8@S_gB7sZnmW=;D@Oy=#t$b%e|<4!_2O$YwMl83zb0;y+iT5ady?B zk2|tH3l?dD0qZcwp6>>`69fjojQ1C8D*i#XQ|u!`RB-Y>AT*(QY!ahLgGhuXB>RVu zOQFq{&SlH9sZ1`+UTg|;6>k1l+Lx{9>KsJFe=mzocYE{l};7)!?dH1C7HglrNH0h^?nsyR49wyp1n}ip}{c`A5>U}=r3n| zX#2GZ{si)W?{3CZ72l;oF*TaCLYvJ{6aA!XT)`aRI7sufjY!E{e!F8c@LP#J;4f)r zW|d-xP|G3BM}ToxNdM0@-^;+tt1UiB56#pZd71?^oxVWV!e2#5vGhN_>8ykoy=JNu zz4~-fkbsaQv-Oqzb55vc;ju}q!0ia^suyX&d*y+!%Rw}8v49~r_j{R}6oUptZ1t;A z1Etl=urWbZi4pNZt3TbVD@3vvg-ySjDt_Y}Na}l+xx9U~n~#7V=`<>pX2@3LP8JFs zzo=8Ht29ZA;sz<|0#f_Y-g72YzD62%QAX-CPRDAh&z%wiA>q)^H3GU#t1@_zz&IZW zFtqL^pP)aU{`VH42L3%59-Cjmw*_~PBMbh!k0`;bu)%++kRn|p~?;TI0K6q$D24a@< zxv?!Zn%-J@S$-+rE~h@s!2Sw4zAi~iOl+^RN8mQ?%lGYYVe6aY)+(MB3^UlPNwI}A z_hNHzvIfACOuL7qwHU^@QAsjYQ?=p!3$d5U?}=*eq)=7w7B;k`BiCH6r476s%Z>jG z?yM;DJ>@P+?Yb#iyHj|mx0Wwz-(C5ko@VoYs&AC?tQddeJS;deKUZ_h!>7H^(GQ^f zdJ@>JU~Crf$1Ce$fBWbnEG|lksWtf_bK zIdY0Yx8UGx|XxK z17~({rjvFLN>TmvX`EK*#4ZI*49)6oPB0MumN8H?t<~9MA~KZ$kESJ^f|i>;h~RT> zB{#NNXJ6l{wL93xk(zuy04>~+)MRj>*R#}dfCIB<;KS1#Px(7~t^Mp!1?&?y^NOMa zvZSiMgjb-kYO}#<`w#unWaiN}Ycy9_cw{ico=>}zEuzslYqc<};g7VIJovkXb{ zm0$L}=y6Dl^0B~^s1U z+_LZzOcEXeY2KE(tVhBgu$|Ic%0QiU%>*AfnF3bX?N!D$M>p`dZ~^(=18@$mCN)67 zyu)*5!DhOT`o&tnH^fl}7SibIK0Qa6aAjlbB#pY6zSvbjVu?=I zri!Fsj-aWM;^+wbptveE{jXU1rli1Z6k`uoJo*%Heb}rZv005D*HQ5IBtjr;`&c^F z;>8sb!Slf0>44zfve|yK4l~CmlAK(u7q8ZnmiC&D8a5YiM-kkmVDY3dCChL}Cfe{8 z5pmoMUB~Orc|K4gG&A{M2k2KfJafkq1Rm{a>1v&dR7%!tBaOn2(ZLGke8c+XM>Z>S ziC`lrqYcdgsO6tTcD*@5<`j4nQv>CsaQS_m9Z*8_M!l7wCuB?n(Wq6o%W9kgs&ZgQYsAyeI_7p&Q=-2YtB=FMe+&)?s&YT!{){EtwE0Tl*gPo8sakGxAV^(ILxSK6 zc5a>OG<0fUeQe75?ckM?g@26H za0d#qcSBdAlika4M90WANbkOQUiLf)r?%%2uwLx_v^Mm65%oX;|0KD6EhLaaoZ3Y$ z++fafccNI<2}*7@qi9GONG$rQDqFvz!hsAwR;@Hf&w129{LYDOUybAU9neoJJiol$ zAeXdJSo&$+upZpND)eC;R1^*SyT}0U9@0s`{@^%+w!_H%+d8IFw@_{4onD1x4;d4_ zps3Jo&+&Z1@{O*FwupnBnv1%*qr7}8LuGuIv7E;#gr%O8bfRz4T83KRq$1LhJoj#S z!aVFjwi5F1E>MJYNzJWF6jPGsAH8Sgd2-OE>F@EyA#nWUj>rC>{g}5^=SR7354!g} z4}UEn+U<{k#NrD`AqRt!wcg3Z_iyO7Ic>GYa&{*SGKh)di5GS_#?tv~X@3_`av+TW zgNop(r`M9<+w<4JDLLa)p_mU(QPD^=FHy5q|3I7~6a9ndJ#;oX$_a>y9(<$1A`otu Ovwb_aSN-~N`F{YRmY9G5 literal 0 HcmV?d00001 diff --git a/tests/test_classifier_faces.py b/tests/test_classifier_faces.py new file mode 100644 index 00000000..b53d626a --- /dev/null +++ b/tests/test_classifier_faces.py @@ -0,0 +1,51 @@ +from pathlib import Path + +from annoy import AnnoyIndex +import numpy as np +from PIL import Image + +from photonix.classifiers.face.deepface import DeepFace +from photonix.classifiers.face.deepface.commons.distance import findEuclideanDistance + + +TRAIN_FACES = [ + 'Boris_Becker_0003.jpg', + 'Boris_Becker_0004.jpg', + 'David_Beckham_0001.jpg', + 'David_Beckham_0002.jpg', +] +TEST_FACES = [ + # Test image, nearest match in TRAIN_FACES, distance (3DP) + ('Boris_Becker_0005.jpg', 0, '9.614'), + ('David_Beckham_0010.jpg', 2, '10.956'), + ('Barbara_Becker_0001.jpg', 2, '15.736'), +] + + +def test_annoy_index(): + embedding_cache = [] + embedding_size = 128 # FaceNet output size + t = AnnoyIndex(embedding_size, 'euclidean') + + for i, fn in enumerate(TRAIN_FACES): + path = str(Path(__file__).parent / 'photos' / 'faces' / fn) + image_data = Image.open(path) + embedding = DeepFace.represent(np.asarray(image_data), model_name='Facenet') + embedding_cache.append(embedding) + t.add_item(i, embedding) + + t.build(3) # Number of random forest trees + + for i, (fn, expected_nearest, expected_distance), in enumerate(TEST_FACES): + path = str(Path(__file__).parent / 'photos' / 'faces' / fn) + image_data = Image.open(path) + embedding = DeepFace.represent(np.asarray(image_data), model_name='Facenet') + nearest, distance = t.get_nns_by_vector(embedding, 1, include_distances=True) + nearest = nearest[0] + distance = distance[0] + + assert nearest == expected_nearest + assert '{:.3f}'.format(distance) == expected_distance + assert abs(findEuclideanDistance(embedding, embedding_cache[nearest]) - distance) < 0.000001 + # import pdb; pdb.set_trace() + From d7262196f857592a35269bd21dd0429481a4d198 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 2 Jun 2021 18:07:11 +0100 Subject: [PATCH 038/110] Refactors face model, better tests, better performance (model caching) --- photonix/classifiers/face/__init__.py | 2 +- photonix/classifiers/face/model.py | 233 +++++++++++++----- photonix/classifiers/runners.py | 13 +- .../commands/classification_face_processor.py | 5 +- .../commands/retrain_face_similarity_index.py | 56 +---- tests/test_classifier_faces.py | 51 ---- tests/test_classifier_models.py | 77 +++++- ui/src/components/BoundingBoxes.js | 2 + 8 files changed, 264 insertions(+), 175 deletions(-) delete mode 100644 tests/test_classifier_faces.py diff --git a/photonix/classifiers/face/__init__.py b/photonix/classifiers/face/__init__.py index fbc53f82..25b2240f 100644 --- a/photonix/classifiers/face/__init__.py +++ b/photonix/classifiers/face/__init__.py @@ -1 +1 @@ -from .model import FaceDetectionModel, run_on_photo +from .model import FaceModel, run_on_photo diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index dd9117c7..a2b4290a 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -16,101 +16,208 @@ from photonix.classifiers.face.deepface import DeepFace from photonix.classifiers.face.mtcnn import MTCNN from photonix.classifiers.face.deepface.commons.distance import findEuclideanDistance +from photonix.classifiers.face.deepface.DeepFace import build_model GRAPH_FILE = os.path.join('face', 'mtcnn_weights.npy') DISTANCE_THRESHOLD = 10 -class FaceDetectionModel(BaseModel): +class FaceModel(BaseModel): name = 'face' version = 20210528 + retrained_version = 0 + library_id = None approx_ram_mb = 600 max_num_workers = 1 - def __init__(self, model_dir=None, graph_file=GRAPH_FILE, lock_name=None): + def __init__(self, model_dir=None, graph_file=GRAPH_FILE, library_id=None, lock_name=None): super().__init__(model_dir=model_dir) + self.library_id = library_id graph_file = os.path.join(self.model_dir, graph_file) if self.ensure_downloaded(lock_name=lock_name): self.graph = self.load_graph(graph_file) + def load_graph(self, graph_file): r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) with Lock(r, 'classifier_{}_load_graph'.format(self.name)): - if self.graph_cache_key in self.graph_cache: - return self.graph_cache[self.graph_cache_key] + # Load MTCNN + mtcnn_graph = None + mtcnn_key = '{self.graph_cache_key}:mtcnn' + if mtcnn_key in self.graph_cache: + mtcnn_graph = self.graph_cache[mtcnn_key] + else: + mtcnn_graph = MTCNN(weights_file=graph_file) + self.graph_cache[mtcnn_key] = mtcnn_graph + + # Load Facenet + facenet_graph = None + facenet_key = '{self.graph_cache_key}:facenet' + if facenet_key in self.graph_cache: + facenet_graph = self.graph_cache[facenet_key] + else: + facenet_graph = build_model('Facenet') + self.graph_cache[facenet_key] = facenet_graph - graph = MTCNN(weights_file=graph_file) + # Store version number of retrained model (ANN) if it has been computed + self.reload_retrained_model_version() - self.graph_cache[self.graph_cache_key] = graph - return graph + return { + 'mtcnn': mtcnn_graph, + 'facenet': facenet_graph, + } def predict(self, image_file, min_score=0.99): + # Detects face bounding boxes image = Image.open(image_file) image = np.asarray(image) - results = self.graph.detect_faces(image) + results = self.graph['mtcnn'].detect_faces(image) return list(filter(lambda f: f['confidence'] > min_score, results)) + def crop(self, image_data, box): + return image_data.crop([ + max(box[0]-int(box[2]*0.3), 0), + max(box[1]-int(box[3]*0.3), 0), + min(box[0]+box[2]+int(box[2]*0.3), image_data.width), + min(box[1]+box[3]+int(box[3]*0.3), image_data.height) + ]) -def find_closest_face_tag(library_id, source_embedding): - # Use ANN index to do quick serach if it has been trained by retrain_face_similarity_index - from django.conf import settings - ann_path = Path(settings.MODEL_DIR) / 'face' / 'faces.ann' - tag_ids_path = Path(settings.MODEL_DIR) / 'face' / 'faces_tag_ids.json' + def get_face_embedding(self, image_data): + return DeepFace.represent(np.asarray(image_data), model_name='Facenet', model= self.graph['facenet']) + + def find_closest_face_tag_by_ann(self, source_embedding): + # Use ANN index to do quick serach if it has been trained by retrain_face_similarity_index + from django.conf import settings + ann_path = Path(settings.MODEL_DIR) / 'face' / f'faces_{self.library_id}.ann' + tag_ids_path = Path(settings.MODEL_DIR) / 'face' / f'faces_tag_ids_{self.library_id}.json' + + if os.path.exists(ann_path) and os.path.exists(tag_ids_path): + embedding_size = 128 # FaceNet output size + t = AnnoyIndex(embedding_size, 'euclidean') + # Ensure ANN index, tag IDs and version files can't be updated while we are reading + r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) + with Lock(r, 'face_model_retrain'): + self.reload_retrained_model_version() + t.load(str(ann_path)) + with open(tag_ids_path) as f: + tag_ids = json.loads(f.read()) + nearest = t.get_nns_by_vector(source_embedding, 1, include_distances=True) + # import pdb; pdb.set_trace() + return tag_ids[nearest[0][0]], nearest[1][0] + + return (None, None) + + def find_closest_face_tag_by_brute_force(self, source_embedding, target_data=None): + if not self.library_id and not target_data: + raise ValueError('No Library ID is set') + + representations = [] + if target_data: # Mainly as an option for testing + for id, embedding in target_data: + representations.append((id, embedding)) + else: + # Collect all previously generated embeddings + from photonix.photos.models import PhotoTag + for photo_tag in PhotoTag.objects.filter(photo__library_id=self.library_id, tag__type='F'): + try: + tag_embedding = json.loads(photo_tag.extra_data)['facenet_embedding'] + representations.append((str(photo_tag.tag.id), tag_embedding)) + except (KeyError, json.decoder.JSONDecodeError): + pass + + # Calculate Euclidean distances + distances = [] + for (_, target_embedding) in representations: + distance = findEuclideanDistance(source_embedding, target_embedding) + distances.append(distance) + + # Return closest match and distance value + if not distances: # First face added has nothing to compare to + return (None, 999) + candidate_idx = np.argmin(distances) + return (representations[candidate_idx][0], distances[candidate_idx]) + + def find_closest_face_tag(self, source_embedding): + if not self.library_id: + raise ValueError('No Library ID is set') + + ann_nearest, ann_distance = self.find_closest_face_tag_by_ann(source_embedding) + if ann_nearest: + return ann_nearest, ann_distance + + brute_force_nearest, brute_force_distance = self.find_closest_face_tag_by_brute_force(source_embedding) + return brute_force_nearest, brute_force_distance + + def retrain_face_similarity_index(self, training_data=None): + if not self.library_id and not training_data: + raise ValueError('No Library ID is set') + + from django.conf import settings + from photonix.photos.models import PhotoTag + ann_path = Path(settings.MODEL_DIR) / 'face' / f'faces_{self.library_id}.ann' + tag_ids_path = Path(settings.MODEL_DIR) / 'face' / f'faces_tag_ids_{self.library_id}.json' + version_file = Path(settings.MODEL_DIR) / 'face' / f'retrained_version_{self.library_id}.txt' - if os.path.exists(ann_path) and os.path.exists(tag_ids_path): embedding_size = 128 # FaceNet output size t = AnnoyIndex(embedding_size, 'euclidean') - # Ensure ANN index, tag IDs and version files can't be updated while we are reading + retrained_version = datetime.utcnow().strftime('%Y%m%d%H%M%S') + + tag_ids = [] + if training_data: # Mainly as an option for testing + for id, embedding in training_data: + t.add_item(len(tag_ids), embedding) + tag_ids.append(id) + else: + for photo_tag in PhotoTag.objects.filter(tag__type='F').order_by('id'): + try: + extra_data = json.loads(photo_tag.extra_data) + embedding = extra_data['facenet_embedding'] + t.add_item(len(tag_ids), embedding) + tag_ids.append(str(photo_tag.tag.id)) + except (json.decoder.JSONDecodeError, KeyError, TypeError): + pass + + # Build the ANN index + t.build(3) # Number of random forest trees + + # Aquire lock to save ANN, tag IDs and version files atomically r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) with Lock(r, 'face_model_retrain'): - t.load(str(ann_path)) - with open(tag_ids_path) as f: - tag_ids = json.loads(f.read()) - nearest = t.get_nns_by_vector(source_embedding, 1, include_distances=True) - return tag_ids[nearest[0][0]], nearest[1][0] - - # Collect all previously generated embeddings - from photonix.photos.models import PhotoTag - representations = [] - for photo_tag in PhotoTag.objects.filter(photo__library_id=library_id, tag__type='F'): - try: - tag_embedding = json.loads(photo_tag.extra_data)['facenet_embedding'] - representations.append((str(photo_tag.tag.id), tag_embedding)) - except (KeyError, json.decoder.JSONDecodeError): - pass - - # Calculate Euclidean distances - distances = [] - for (_, target_embedding) in representations: - distance = findEuclideanDistance(source_embedding, target_embedding) - distances.append(distance) - - # Return closest match and distance value - if not distances: # First face has nothing to compare to - return (None, 999) - candidate_idx = np.argmin(distances) - return (representations[candidate_idx][0], distances[candidate_idx]) - - -def get_retrained_model_version(): - from django.conf import settings - version_file = Path(settings.MODEL_DIR) / 'face' / 'retrained_version.txt' - version_date = None - if os.path.exists(version_file): - with open(version_file) as f: - contents = f.read().strip() - version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) - return int(version_date.strftime('%Y%m%d%H%M%S')) - return 0 + # Save ANN index + t.save(str(ann_path)) + + # Save Tag IDs to JSON file as Annoy only supports integer IDs so we have to do the mapping ourselves + with open(tag_ids_path, 'w') as f: + f.write(json.dumps(tag_ids)) + + # Save version of retrained model to text file - used to save against on PhotoTag model and to determine whether retraining is required + with open(version_file, 'w') as f: + f.write(retrained_version) + + def reload_retrained_model_version(self): + if self.library_id: + from django.conf import settings + version_file = Path(settings.MODEL_DIR) / 'face' / f'retrained_version_{self.library_id}.txt' + version_date = None + if os.path.exists(version_file): + with open(version_file) as f: + contents = f.read().strip() + version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) + self.retrained_version = int(version_date.strftime('%Y%m%d%H%M%S')) + return self.retrained_version + return 0 def run_on_photo(photo_id): - model = FaceDetectionModel() sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) - from photonix.classifiers.runners import results_for_model_on_photo, get_or_create_tag + from photonix.classifiers.runners import get_photo_by_any_type, results_for_model_on_photo, get_or_create_tag + + photo = get_photo_by_any_type(photo_id) + model = FaceModel(library_id=photo and photo.library_id) + # Detect all faces in an image photo, results = results_for_model_on_photo(model, photo_id) @@ -118,25 +225,21 @@ def run_on_photo(photo_id): path = photo_id if photo: path = photo.base_image_path + model.library_id = photo.library_id image_data = Image.open(path) # Loop over each face that was detected above for result in results: # Crop individual face + 30% extra in each direction box = result['box'] - face_image = image_data.crop([ - max(box[0]-int(box[2]*0.3), 0), - max(box[1]-int(box[3]*0.3), 0), - min(box[0]+box[2]+int(box[2]*0.3), image_data.width), - min(box[1]+box[3]+int(box[3]*0.3), image_data.height) - ]) + face_image = model.crop(image_data, box) # Generate embedding with Facenet try: - embedding = DeepFace.represent(np.asarray(face_image), model_name='Facenet') + embedding = model.get_face_embedding(face_image) # Add it to the results result['embedding'] = embedding if photo: - closest_tag, closest_distance = find_closest_face_tag(photo.library, embedding) + closest_tag, closest_distance = model.find_closest_face_tag(embedding) if closest_tag: print(f'Closest tag: {closest_tag}') print(f'Closest distance: {closest_distance}') @@ -165,7 +268,7 @@ def run_on_photo(photo_id): if 'embedding' in result: extra_data = json.dumps({'facenet_embedding': result['embedding']}) - PhotoTag(photo=photo, tag=tag, source='F', confidence=score, significance=score, position_x=x, position_y=y, size_x=width, size_y=height, model_version=model.version, retrained_model_version=get_retrained_model_version(), extra_data=extra_data).save() + PhotoTag(photo=photo, tag=tag, source='F', confidence=score, significance=score, position_x=x, position_y=y, size_x=width, size_y=height, model_version=model.version, retrained_model_version=model.retrained_version, extra_data=extra_data).save() photo.classifier_color_completed_at = timezone.now() photo.classifier_color_version = getattr(model, 'version', 0) photo.save() diff --git a/photonix/classifiers/runners.py b/photonix/classifiers/runners.py index 98015280..f091d949 100644 --- a/photonix/classifiers/runners.py +++ b/photonix/classifiers/runners.py @@ -1,5 +1,6 @@ import os import re +from time import sleep from uuid import UUID @@ -18,7 +19,7 @@ def get_or_create_tag(library, name, type, source, parent=None, ordering=None): return tag -def results_for_model_on_photo(model, photo_id): +def get_photo_by_any_type(photo_id): is_photo_instance = False photo = None @@ -30,8 +31,6 @@ def results_for_model_on_photo(model, photo_id): elif hasattr(photo_id, 'id'): photo = photo_id - # import pdb; pdb.set_trace() - # Is an individual filename so return the prediction if not is_photo_instance: return None, model.predict(photo_id) @@ -47,6 +46,10 @@ def results_for_model_on_photo(model, photo_id): from photonix.photos.models import Photo photo = Photo.objects.get(id=photo_id) - results = model.predict(photo.base_image_path) + return is_photo_instance and photo or None - return is_photo_instance and photo or None, results + +def results_for_model_on_photo(model, photo_id): + photo = get_photo_by_any_type(photo_id) + results = model.predict(photo.base_image_path) + return photo, results diff --git a/photonix/photos/management/commands/classification_face_processor.py b/photonix/photos/management/commands/classification_face_processor.py index 04364888..0386c66a 100644 --- a/photonix/photos/management/commands/classification_face_processor.py +++ b/photonix/photos/management/commands/classification_face_processor.py @@ -1,11 +1,10 @@ from django.core.management.base import BaseCommand # Pre-load the model graphs so it doesn't have to be done for each job -from photonix.classifiers.face import FaceDetectionModel, run_on_photo +from photonix.classifiers.face import run_on_photo from photonix.photos.utils.classification import ThreadedQueueProcessor -print('Loading face model') -model = FaceDetectionModel() +model = None class Command(BaseCommand): diff --git a/photonix/photos/management/commands/retrain_face_similarity_index.py b/photonix/photos/management/commands/retrain_face_similarity_index.py index 431435ee..b3fecf26 100644 --- a/photonix/photos/management/commands/retrain_face_similarity_index.py +++ b/photonix/photos/management/commands/retrain_face_similarity_index.py @@ -4,22 +4,18 @@ from pathlib import Path from time import time -from annoy import AnnoyIndex from django.conf import settings from django.core.management.base import BaseCommand from django.utils import timezone -import redis -from redis_lock import Lock -from photonix.photos.models import PhotoTag +from photonix.photos.models import Library, PhotoTag +from photonix.classifiers.face.model import FaceModel class Command(BaseCommand): help = 'Creates Approximate Nearest Neighbour (ANN) search index for quickly finding closest face without having to compare one-by-one.' def retrain_face_similarity_index(self): - ann_path = Path(settings.MODEL_DIR) / 'face' / 'faces.ann' - tag_ids_path = Path(settings.MODEL_DIR) / 'face' / 'faces_tag_ids.json' version_file = Path(settings.MODEL_DIR) / 'face' / 'retrained_version.txt' version_date = None @@ -28,48 +24,16 @@ def retrain_face_similarity_index(self): contents = f.read().strip() version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) - if version_date and PhotoTag.objects.filter(updated_at__gt=version_date).count() == 0: - print('No new PhotoTags so no point in updating face ANN index') - return + for library in Library.objects.all(): + start = time() + print(f'Updating ANN index for Library {library.id}') + if version_date and PhotoTag.objects.filter(updated_at__gt=version_date).count() == 0: + print(' No new PhotoTags in Library so no point in updating face ANN index') + return - start = time() + FaceModel(library_id=library.id).retrain_face_similarity_index() - if PhotoTag.objects.filter(tag__type='F').count() < 10: - print('Not enough face tags to warrant building face ANN index') - try: - os.remove(ann_path) - os.remove(tag_ids_path) - except: - pass - return - - embedding_size = 128 # FaceNet output size - t = AnnoyIndex(embedding_size, 'euclidean') - tag_ids = [] - for photo_tag in PhotoTag.objects.filter(tag__type='F'): - extra_data = json.loads(photo_tag.extra_data) - embedding = extra_data['facenet_embedding'] - t.add_item(len(tag_ids), embedding) - tag_ids.append(str(photo_tag.tag.id)) - - # Build the ANN index - t.build(3) # Number of random forest trees - - # Aquire lock to save ANN, tag IDs and version files atomically - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - with Lock(r, 'face_model_retrain'): - # Save ANN index - t.save(str(ann_path)) - - # Save Tag IDs to JSON file as Annoy only supports integer IDs so we have to do the mapping ourselves - with open(tag_ids_path, 'w') as f: - f.write(json.dumps(tag_ids)) - - # Save version of retrained model to text file - used to save against on PhotoTag model and to determine whether retraining is required - with open(version_file, 'w') as f: - f.write(datetime.utcnow().strftime('%Y%m%d%H%M%S')) - - print(f'Face ANN index updated in {(time() - start):.3f}s') + print(f' Completed in {(time() - start):.3f}s') def handle(self, *args, **options): self.retrain_face_similarity_index() diff --git a/tests/test_classifier_faces.py b/tests/test_classifier_faces.py deleted file mode 100644 index b53d626a..00000000 --- a/tests/test_classifier_faces.py +++ /dev/null @@ -1,51 +0,0 @@ -from pathlib import Path - -from annoy import AnnoyIndex -import numpy as np -from PIL import Image - -from photonix.classifiers.face.deepface import DeepFace -from photonix.classifiers.face.deepface.commons.distance import findEuclideanDistance - - -TRAIN_FACES = [ - 'Boris_Becker_0003.jpg', - 'Boris_Becker_0004.jpg', - 'David_Beckham_0001.jpg', - 'David_Beckham_0002.jpg', -] -TEST_FACES = [ - # Test image, nearest match in TRAIN_FACES, distance (3DP) - ('Boris_Becker_0005.jpg', 0, '9.614'), - ('David_Beckham_0010.jpg', 2, '10.956'), - ('Barbara_Becker_0001.jpg', 2, '15.736'), -] - - -def test_annoy_index(): - embedding_cache = [] - embedding_size = 128 # FaceNet output size - t = AnnoyIndex(embedding_size, 'euclidean') - - for i, fn in enumerate(TRAIN_FACES): - path = str(Path(__file__).parent / 'photos' / 'faces' / fn) - image_data = Image.open(path) - embedding = DeepFace.represent(np.asarray(image_data), model_name='Facenet') - embedding_cache.append(embedding) - t.add_item(i, embedding) - - t.build(3) # Number of random forest trees - - for i, (fn, expected_nearest, expected_distance), in enumerate(TEST_FACES): - path = str(Path(__file__).parent / 'photos' / 'faces' / fn) - image_data = Image.open(path) - embedding = DeepFace.represent(np.asarray(image_data), model_name='Facenet') - nearest, distance = t.get_nns_by_vector(embedding, 1, include_distances=True) - nearest = nearest[0] - distance = distance[0] - - assert nearest == expected_nearest - assert '{:.3f}'.format(distance) == expected_distance - assert abs(findEuclideanDistance(embedding, embedding_cache[nearest]) - distance) < 0.000001 - # import pdb; pdb.set_trace() - diff --git a/tests/test_classifier_models.py b/tests/test_classifier_models.py index 76694a8c..897da80a 100644 --- a/tests/test_classifier_models.py +++ b/tests/test_classifier_models.py @@ -3,6 +3,9 @@ from datetime import datetime from pathlib import Path +from django.conf import settings +from PIL import Image + def test_downloading(tmpdir): from photonix.classifiers.style.model import StyleModel @@ -43,19 +46,19 @@ def test_location_predict(): assert result['city']['population'] == 7556900 # In the sea near Oia, Santorini, Greece - Country is inferred from city - result = model.predict(location=[36.4396445,25.3560936]) + result = model.predict(location=[36.4396445, 25.3560936]) assert result['country']['name'] == 'Greece' assert result['city']['name'] == 'Oía' assert result['city']['distance'] == 3132 assert result['city']['population'] == 3376 # Too far off the coast of John o' Groats, Scotland, UK - No match - result = model.predict(location=[58.6876742,-3.4206862]) + result = model.predict(location=[58.6876742, -3.4206862]) assert result['country'] == None assert result['city'] == None # Vernier, Switzerland - Tests country code mainly (CH can be China in some codings) - result = model.predict(location=[46.1760906,5.9929043]) + result = model.predict(location=[46.1760906, 5.9929043]) assert result['country']['name'] == 'Switzerland' assert result['country']['code'] == 'CH' assert result['city']['country_name'] == 'Switzerland' @@ -74,7 +77,6 @@ def test_object_predict(): model = ObjectModel() snow = str(Path(__file__).parent / 'photos' / 'snow.jpg') result = model.predict(snow) -# import pdb; pdb.set_trace() assert len(result) == 3 @@ -105,3 +107,70 @@ def test_style_predict(): assert len(result) == 1 assert result[0][0] == 'serene' assert '{0:.3f}'.format(result[0][1]) == '0.962' + + +def test_face_predict(): + from photonix.classifiers.face.model import FaceModel + from photonix.classifiers.face.deepface.commons.distance import findEuclideanDistance + + TRAIN_FACES = [ + 'Boris_Becker_0003.jpg', + 'Boris_Becker_0004.jpg', + 'David_Beckham_0001.jpg', + 'David_Beckham_0002.jpg', + ] + TEST_FACES = [ + # Test image, nearest match in TRAIN_FACES, distance (3DP) + ('Boris_Becker_0005.jpg', 0, '9.614'), + ('David_Beckham_0010.jpg', 2, '10.956'), + ('Barbara_Becker_0001.jpg', 2, '15.736'), + ] + + embedding_cache = [] + model = FaceModel() + model.library_id = '00000000-0000-0000-0000-000000000000' + + # Calculate embeddings for training faces + for fn in TRAIN_FACES: + path = str(Path(__file__).parent / 'photos' / 'faces' / fn) + image_data = Image.open(path) + embedding = model.get_face_embedding(image_data) + embedding_cache.append(embedding) + + training_data = [(i, embedding) for i, embedding in enumerate(embedding_cache)] + + # Compare test faces using brute force Euclidian calculations + for fn, expected_nearest, expected_distance in TEST_FACES: + path = str(Path(__file__).parent / 'photos' / 'faces' / fn) + image_data = Image.open(path) + embedding = model.get_face_embedding(image_data) + nearest, distance = model.find_closest_face_tag_by_brute_force(embedding, target_data=training_data) + + assert nearest == expected_nearest + assert '{:.3f}'.format(distance) == expected_distance + assert findEuclideanDistance(embedding, embedding_cache[nearest]) == distance + + # Train ANN index + model.retrain_face_similarity_index(training_data=training_data) + + # Compare test faces using ANN trained index + for fn, expected_nearest, expected_distance in TEST_FACES: + path = str(Path(__file__).parent / 'photos' / 'faces' / fn) + image_data = Image.open(path) + embedding = model.get_face_embedding(image_data) + nearest, distance = model.find_closest_face_tag_by_ann(embedding) + + assert nearest == expected_nearest + assert '{:.3f}'.format(distance) == expected_distance + assert abs(findEuclideanDistance(embedding, embedding_cache[nearest]) - distance) < 0.000001 + + # Tidy up ANN model training + for fn in [ + f'faces_{model.library_id}.ann', + f'faces_tag_ids_{model.library_id}.json', + f'retrained_version_{model.library_id}.txt', + ]: + try: + os.remove(Path(settings.MODEL_DIR) / 'face' / fn) + except: + pass diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 1e695e30..674bb308 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -22,6 +22,7 @@ const Container = styled('div')` text-align: left; white-space: nowrap; pointer-events: all; + border-radius: 3px 3px 0 0; &:hover { overflow: visible; text-shadow: 0 0 2px #f00; @@ -45,6 +46,7 @@ const Container = styled('div')` .FeatureLabel { font-size: 8px; padding: 0 3px 1px 3px; + border-radius: 5px 5px 0 0; } } } From d706c4f9d22279379c2ab46227520a8d4d32c5a8 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 2 Jun 2021 18:33:52 +0100 Subject: [PATCH 039/110] ANN now runs first and the brute force fills in comparing photos that were added since last models retraining --- photonix/classifiers/face/model.py | 32 ++++++++++++++++++++---------- 1 file changed, 22 insertions(+), 10 deletions(-) diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index a2b4290a..83e16503 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -105,12 +105,11 @@ def find_closest_face_tag_by_ann(self, source_embedding): with open(tag_ids_path) as f: tag_ids = json.loads(f.read()) nearest = t.get_nns_by_vector(source_embedding, 1, include_distances=True) - # import pdb; pdb.set_trace() return tag_ids[nearest[0][0]], nearest[1][0] - return (None, None) + return (None, 999) - def find_closest_face_tag_by_brute_force(self, source_embedding, target_data=None): + def find_closest_face_tag_by_brute_force(self, source_embedding, oldest_date=None, target_data=None): if not self.library_id and not target_data: raise ValueError('No Library ID is set') @@ -121,7 +120,10 @@ def find_closest_face_tag_by_brute_force(self, source_embedding, target_data=Non else: # Collect all previously generated embeddings from photonix.photos.models import PhotoTag - for photo_tag in PhotoTag.objects.filter(photo__library_id=self.library_id, tag__type='F'): + photo_tags = PhotoTag.objects.filter(photo__library_id=self.library_id, tag__type='F') + if oldest_date: + photo_tags = photo_tags.filter(created_at__gt=oldest_date) + for photo_tag in photo_tags: try: tag_embedding = json.loads(photo_tag.extra_data)['facenet_embedding'] representations.append((str(photo_tag.tag.id), tag_embedding)) @@ -137,19 +139,25 @@ def find_closest_face_tag_by_brute_force(self, source_embedding, target_data=Non # Return closest match and distance value if not distances: # First face added has nothing to compare to return (None, 999) - candidate_idx = np.argmin(distances) - return (representations[candidate_idx][0], distances[candidate_idx]) + index = np.argmin(distances) + return (representations[index][0], distances[index]) def find_closest_face_tag(self, source_embedding): if not self.library_id: raise ValueError('No Library ID is set') ann_nearest, ann_distance = self.find_closest_face_tag_by_ann(source_embedding) - if ann_nearest: - return ann_nearest, ann_distance - brute_force_nearest, brute_force_distance = self.find_closest_face_tag_by_brute_force(source_embedding) - return brute_force_nearest, brute_force_distance + oldest_date = None + if self.retrained_version: + oldest_date = datetime.strptime(str(self.retrained_version), '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) + + brute_force_nearest, brute_force_distance = self.find_closest_face_tag_by_brute_force(source_embedding, oldest_date=oldest_date) + + if ann_nearest and ann_distance < brute_force_distance: + return ann_nearest, ann_distance + else: + return brute_force_nearest, brute_force_distance def retrain_face_similarity_index(self, training_data=None): if not self.library_id and not training_data: @@ -251,13 +259,17 @@ def run_on_photo(photo_id): if photo: from django.utils import timezone from photonix.photos.models import Tag, PhotoTag + photo.clear_tags(source='C', type='F') for result in results: + # Use matched tag if within distance threshold if result.get('closest_distance', 999) < DISTANCE_THRESHOLD: tag = Tag.objects.get(id=result['closest_tag'], library=photo.library, type='F') print(f'MATCHED {tag.name}') + # Otherwise create new tag else: tag = get_or_create_tag(library=photo.library, name=f'Unknown person {randint(0, 999999):06d}', type='F', source='C') + x = (result['box'][0] + (result['box'][2] / 2)) / photo.base_file.width y = (result['box'][1] + (result['box'][3] / 2)) / photo.base_file.height width = result['box'][2] / photo.base_file.width From d2209e24a86909bfe3d6df96de14d594fe6ed94e Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 3 Jun 2021 15:43:41 +0100 Subject: [PATCH 040/110] Fixes for prd Dockerfile and allow PhotoTags to be marked as deleted --- Makefile | 3 +++ docker/Dockerfile.prd | 3 ++- photonix/classifiers/face/model.py | 12 ++++++------ ...529_1244.py => 0008_auto_20210603_1442.py} | 19 +++++++++++++++++-- .../migrations/0008_phototag_extra_data.py | 18 ------------------ .../migrations/0010_alter_photo_flash.py | 18 ------------------ photonix/photos/models.py | 5 +++++ 7 files changed, 33 insertions(+), 45 deletions(-) rename photonix/photos/migrations/{0009_auto_20210529_1244.py => 0008_auto_20210603_1442.py} (55%) delete mode 100644 photonix/photos/migrations/0008_phototag_extra_data.py delete mode 100644 photonix/photos/migrations/0010_alter_photo_flash.py diff --git a/Makefile b/Makefile index a19ca349..9443022b 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,9 @@ restart: shell: $(DOCKER_COMPOSE_DEV) exec photonix bash +shell-prd: + $(DOCKER_COMPOSE_PRD) exec photonix bash + manage: $(DOCKER_COMPOSE_DEV) exec photonix python photonix/manage.py ${} diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index 25fe0948..f8ddb971 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -86,7 +86,6 @@ RUN rm -rf \ /usr/local/lib/python3.8/site-packages/matplotlib/mpl-data/sample_data \ /usr/local/lib/python3.8/site-packages/matplotlib/mpl-data/images \ /usr/local/lib/python3.8/site-packages/matplotlib/mpl-data/stylelib \ - /usr/local/lib/python3.8/site-packages/h5py \ /usr/local/lib/python3.8/site-packages/tensorboard \ /usr/local/lib/python3.8/site-packages/tensorboard_plugin_wit @@ -100,6 +99,7 @@ RUN apt-get update && \ libatlas3-base \ libfreetype6 \ libfreetype6-dev \ + libgl1 \ libglib2.0-dev \ libimage-exiftool-perl \ libpq-dev \ @@ -107,6 +107,7 @@ RUN apt-get update && \ netcat \ nginx-light \ supervisor \ + xz-utils \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* \ diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index 83e16503..e8027f0a 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -91,8 +91,8 @@ def get_face_embedding(self, image_data): def find_closest_face_tag_by_ann(self, source_embedding): # Use ANN index to do quick serach if it has been trained by retrain_face_similarity_index from django.conf import settings - ann_path = Path(settings.MODEL_DIR) / 'face' / f'faces_{self.library_id}.ann' - tag_ids_path = Path(settings.MODEL_DIR) / 'face' / f'faces_tag_ids_{self.library_id}.json' + ann_path = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_faces.ann' + tag_ids_path = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_faces_tag_ids.json' if os.path.exists(ann_path) and os.path.exists(tag_ids_path): embedding_size = 128 # FaceNet output size @@ -165,9 +165,9 @@ def retrain_face_similarity_index(self, training_data=None): from django.conf import settings from photonix.photos.models import PhotoTag - ann_path = Path(settings.MODEL_DIR) / 'face' / f'faces_{self.library_id}.ann' - tag_ids_path = Path(settings.MODEL_DIR) / 'face' / f'faces_tag_ids_{self.library_id}.json' - version_file = Path(settings.MODEL_DIR) / 'face' / f'retrained_version_{self.library_id}.txt' + ann_path = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_faces.ann' + tag_ids_path = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_faces_tag_ids.json' + version_file = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_retrained_version.txt' embedding_size = 128 # FaceNet output size t = AnnoyIndex(embedding_size, 'euclidean') @@ -208,7 +208,7 @@ def retrain_face_similarity_index(self, training_data=None): def reload_retrained_model_version(self): if self.library_id: from django.conf import settings - version_file = Path(settings.MODEL_DIR) / 'face' / f'retrained_version_{self.library_id}.txt' + version_file = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_retrained_version.txt' version_date = None if os.path.exists(version_file): with open(version_file) as f: diff --git a/photonix/photos/migrations/0009_auto_20210529_1244.py b/photonix/photos/migrations/0008_auto_20210603_1442.py similarity index 55% rename from photonix/photos/migrations/0009_auto_20210529_1244.py rename to photonix/photos/migrations/0008_auto_20210603_1442.py index 7b71123e..96aa52ee 100644 --- a/photonix/photos/migrations/0009_auto_20210529_1244.py +++ b/photonix/photos/migrations/0008_auto_20210603_1442.py @@ -1,4 +1,4 @@ -# Generated by Django 3.2.3 on 2021-05-29 12:44 +# Generated by Django 3.2.3 on 2021-06-03 14:42 from django.db import migrations, models @@ -6,15 +6,30 @@ class Migration(migrations.Migration): dependencies = [ - ('photos', '0008_phototag_extra_data'), + ('photos', '0007_add_library_ForeignKey'), ] operations = [ + migrations.AddField( + model_name='phototag', + name='deleted', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='phototag', + name='extra_data', + field=models.TextField(null=True), + ), migrations.AddField( model_name='phototag', name='retrained_model_version', field=models.PositiveBigIntegerField(default=0, help_text='If classifier has models that are re-trained locally (e.g. Face) then we want to store this too (YYYYMMDDHHMMSS)'), ), + migrations.AlterField( + model_name='photo', + name='flash', + field=models.BooleanField(null=True), + ), migrations.AlterField( model_name='phototag', name='model_version', diff --git a/photonix/photos/migrations/0008_phototag_extra_data.py b/photonix/photos/migrations/0008_phototag_extra_data.py deleted file mode 100644 index 6c1e6823..00000000 --- a/photonix/photos/migrations/0008_phototag_extra_data.py +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by Django 3.0.14 on 2021-05-27 13:38 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ('photos', '0007_add_library_ForeignKey'), - ] - - operations = [ - migrations.AddField( - model_name='phototag', - name='extra_data', - field=models.TextField(null=True), - ), - ] diff --git a/photonix/photos/migrations/0010_alter_photo_flash.py b/photonix/photos/migrations/0010_alter_photo_flash.py deleted file mode 100644 index 02bd7b2f..00000000 --- a/photonix/photos/migrations/0010_alter_photo_flash.py +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by Django 3.2.3 on 2021-05-30 21:47 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ('photos', '0009_auto_20210529_1244'), - ] - - operations = [ - migrations.AlterField( - model_name='photo', - name='flash', - field=models.BooleanField(null=True), - ), - ] diff --git a/photonix/photos/models.py b/photonix/photos/models.py index 19e30e20..1b7e602b 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -169,7 +169,11 @@ def base_image_path(self): @property def download_url(self): library_url = self.library.get_library_path_store().url + if not library_url: + library_url = '/photos/' library_path = self.library.get_library_path_store().path + if not library_path: + library_path = '/data/photos/' return self.base_file.path.replace(library_path, library_url) @property @@ -264,6 +268,7 @@ class PhotoTag(UUIDModel, VersionedModel): size_y = models.FloatField(null=True) # A place to store extra JSON data such as face feature positions for eyes, nose and mouth extra_data = models.TextField(null=True) + deleted = models.BooleanField(default=False) class Meta: ordering = ['-significance'] From ff9e334ceb4c16cf5b6ce8078f487cbce17f3e1f Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 3 Jun 2021 18:01:39 +0100 Subject: [PATCH 041/110] Fix for download path --- photonix/photos/models.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/photonix/photos/models.py b/photonix/photos/models.py index fbb1ddcd..2749a3d0 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -169,7 +169,11 @@ def base_image_path(self): @property def download_url(self): library_url = self.library.get_library_path_store().url + if not library_url: + library_url = '/photos/' library_path = self.library.get_library_path_store().path + if not library_path: + library_path = '/data/photos/' return self.base_file.path.replace(library_path, library_url) @property From 22cad58876ad048a89416a686099637eac50fd4d Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Fri, 4 Jun 2021 19:58:24 +0100 Subject: [PATCH 042/110] Adds/fixes import and classification of events --- photonix/classifiers/color/model.py | 4 -- photonix/classifiers/event/model.py | 33 ++++--------- photonix/classifiers/location/model.py | 4 -- photonix/classifiers/object/model.py | 4 -- photonix/classifiers/style/model.py | 4 -- .../classification_event_processor.py | 21 ++++++++ .../management/commands/import_demo_photos.py | 24 ++++++---- .../migrations/0008_auto_20210604_1842.py | 48 +++++++++++++++++++ photonix/photos/models.py | 14 +++--- photonix/photos/schema.py | 37 +++++++------- photonix/photos/utils/classification.py | 1 + photonix/photos/utils/db.py | 13 ++--- system/supervisord.conf | 29 +++++++---- ui/src/components/PhotoMetadata.js | 12 +++++ ui/src/containers/PhotoDetailContainer.js | 7 +++ 15 files changed, 169 insertions(+), 86 deletions(-) create mode 100644 photonix/photos/management/commands/classification_event_processor.py create mode 100644 photonix/photos/migrations/0008_auto_20210604_1842.py diff --git a/photonix/classifiers/color/model.py b/photonix/classifiers/color/model.py index 7256e9a1..e7100419 100644 --- a/photonix/classifiers/color/model.py +++ b/photonix/classifiers/color/model.py @@ -82,15 +82,11 @@ def run_on_photo(photo_id): photo, results = results_for_model_on_photo(model, photo_id) if photo: - from django.utils import timezone from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='C') for name, score in results: tag = get_or_create_tag(library=photo.library, name=name, type='C', source='C', ordering=model.colors[name][1]) PhotoTag(photo=photo, tag=tag, source='C', confidence=score, significance=score).save() - photo.classifier_color_completed_at = timezone.now() - photo.classifier_color_version = getattr(model, 'version', 0) - photo.save() return photo, results diff --git a/photonix/classifiers/event/model.py b/photonix/classifiers/event/model.py index 7aac1303..be82592d 100644 --- a/photonix/classifiers/event/model.py +++ b/photonix/classifiers/event/model.py @@ -1,24 +1,14 @@ -import operator import sys from pathlib import Path from photonix.photos.utils.metadata import (PhotoMetadata, parse_datetime) import datetime - class EventModel: version = 20210505 approx_ram_mb = 120 max_num_workers = 2 - def __init__(self): - self.events = { - 'Christmas Day': '25 December', - 'New Year': '31st December 12:00PM to 1st January 12:00PM', - 'Halloween': '31st October', - "Valentine's Day": '14th February', - } - def predict(self, image_file): metadata = PhotoMetadata(image_file) date_taken = None @@ -28,10 +18,10 @@ def predict(self, image_file): if date_taken: events = { datetime.date(date_taken.year, 12, 25): "Christmas Day", - datetime.date(date_taken.year, 10, 31):"Halloween", - datetime.date(date_taken.year, 2, 14):"Valentine's Day", + datetime.date(date_taken.year, 10, 31): "Halloween", + datetime.date(date_taken.year, 2, 14): "Valentine's Day", datetime.date(date_taken.year, 12, 31): "New Year Start", - datetime.date(date_taken.year, 1, 1):"New Year End", + datetime.date(date_taken.year, 1, 1): "New Year End", } date_taken = datetime.datetime(date_taken.year, 12, 31, 2, 30) if events.get(date_taken.date()): @@ -39,25 +29,22 @@ def predict(self, image_file): start_of_day = datetime.datetime.combine(datetime.date(date_taken.year, 12, 31), datetime.datetime.min.time()) end_of_day = start_of_day + datetime.timedelta(days=1) if start_of_day <= date_taken.replace(tzinfo=None) <= end_of_day: - return "New Year" - return events.get(date_taken.date()) - return date_taken + return ['New Year'] + return [events.get(date_taken.date())] + return [] def run_on_photo(photo_id): model = EventModel() sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) from photonix.classifiers.runners import results_for_model_on_photo, get_or_create_tag + photo, results = results_for_model_on_photo(model, photo_id) if photo: - from django.utils import timezone from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='E') for name in results: - tag = get_or_create_tag(library=photo.library, name=name, type='C', source='C', ordering=model.colors[name][1]) - PhotoTag(photo=photo, tag=tag, source='C', confidence=score, significance=score).save() - photo.classifier_color_completed_at = timezone.now() - photo.classifier_color_version = getattr(model, 'version', 0) - photo.save() + tag = get_or_create_tag(library=photo.library, name=name, type='E', source='C') + PhotoTag(photo=photo, tag=tag, source='C', confidence=0.5, significance=0.5).save() return photo, results @@ -69,4 +56,4 @@ def run_on_photo(photo_id): _, results = run_on_photo(sys.argv[1]) - print(results) \ No newline at end of file + print(results) diff --git a/photonix/classifiers/location/model.py b/photonix/classifiers/location/model.py index cb78fee1..47359b4f 100644 --- a/photonix/classifiers/location/model.py +++ b/photonix/classifiers/location/model.py @@ -206,7 +206,6 @@ def run_on_photo(photo_id): photo, results = results_for_model_on_photo(model, photo_id) if photo and results['country']: - from django.utils import timezone from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='L') country_tag = get_or_create_tag(library=photo.library, name=results['country']['name'], type='L', source='C') @@ -214,9 +213,6 @@ def run_on_photo(photo_id): if results['city']: city_tag = get_or_create_tag(library=photo.library, name=results['city']['name'], type='L', source='C', parent=country_tag) PhotoTag(photo=photo, tag=city_tag, source='C', confidence=0.5, significance=0.5).save() - photo.classifier_color_completed_at = timezone.now() - photo.classifier_color_version = getattr(model, 'version', 0) - photo.save() return photo, results diff --git a/photonix/classifiers/object/model.py b/photonix/classifiers/object/model.py index af4c179b..1a16ce53 100644 --- a/photonix/classifiers/object/model.py +++ b/photonix/classifiers/object/model.py @@ -132,15 +132,11 @@ def run_on_photo(photo_id): photo, results = results_for_model_on_photo(model, photo_id) if photo: - from django.utils import timezone from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='O') for result in results: tag = get_or_create_tag(library=photo.library, name=result['label'], type='O', source='C') PhotoTag(photo=photo, tag=tag, source='C', confidence=result['score'], significance=result['significance'], position_x=result['x'], position_y=result['y'], size_x=result['width'], size_y=result['height']).save() - photo.classifier_object_completed_at = timezone.now() - photo.classifier_object_version = getattr(model, 'version', 0) - photo.save() return photo, results diff --git a/photonix/classifiers/style/model.py b/photonix/classifiers/style/model.py index 7aae2e97..8dae639d 100644 --- a/photonix/classifiers/style/model.py +++ b/photonix/classifiers/style/model.py @@ -116,15 +116,11 @@ def run_on_photo(photo_id): photo, results = results_for_model_on_photo(model, photo_id) if photo: - from django.utils import timezone from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='S') for name, score in results: tag = get_or_create_tag(library=photo.library, name=name, type='S', source='C') PhotoTag(photo=photo, tag=tag, source='C', confidence=score, significance=score).save() - photo.classifier_style_completed_at = timezone.now() - photo.classifier_style_version = getattr(model, 'version', 0) - photo.save() return photo, results diff --git a/photonix/photos/management/commands/classification_event_processor.py b/photonix/photos/management/commands/classification_event_processor.py new file mode 100644 index 00000000..1174369c --- /dev/null +++ b/photonix/photos/management/commands/classification_event_processor.py @@ -0,0 +1,21 @@ +from django.core.management.base import BaseCommand +# Pre-load the model graphs so it doesn't have to be done for each job +from photonix.classifiers.event import EventModel, run_on_photo +from photonix.photos.utils.classification import ThreadedQueueProcessor + + +print('Loading event model') +model = EventModel() + + +class Command(BaseCommand): + help = 'Runs the workers with the event classification model.' + + def run_processors(self): + num_workers = 1 + batch_size = 64 + threaded_queue_processor = ThreadedQueueProcessor(model, 'classify.event', run_on_photo, num_workers, batch_size) + threaded_queue_processor.run() + + def handle(self, *args, **options): + self.run_processors() diff --git a/photonix/photos/management/commands/import_demo_photos.py b/photonix/photos/management/commands/import_demo_photos.py index 9cdf1a33..72925081 100644 --- a/photonix/photos/management/commands/import_demo_photos.py +++ b/photonix/photos/management/commands/import_demo_photos.py @@ -42,15 +42,23 @@ def import_photos(self): user.save() except IntegrityError: user = User.objects.get(username='demo') + # Create Library - library, _ = Library.objects.get_or_create( - name='Demo Library', - classification_color_enabled=True, - classification_location_enabled=True, - classification_style_enabled=True, - classification_object_enabled=True, - setup_stage_completed='Th' - ) + try: + library = Library.objects.get( + name='Demo Library', + ) + except Library.DoesNotExist: + library = Library( + name='Demo Library', + classification_color_enabled=True, + classification_location_enabled=True, + classification_style_enabled=True, + classification_object_enabled=True, + setup_stage_completed='Th' + ) + library.save() + # LibraryPath as locally mounted volume LibraryPath.objects.get_or_create( library=library, diff --git a/photonix/photos/migrations/0008_auto_20210604_1842.py b/photonix/photos/migrations/0008_auto_20210604_1842.py new file mode 100644 index 00000000..a93170cc --- /dev/null +++ b/photonix/photos/migrations/0008_auto_20210604_1842.py @@ -0,0 +1,48 @@ +# Generated by Django 3.0.14 on 2021-06-04 18:42 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('photos', '0007_add_library_ForeignKey'), + ] + + operations = [ + migrations.AlterField( + model_name='photo', + name='drive_mode', + field=models.CharField(max_length=64, null=True), + ), + migrations.AlterField( + model_name='photo', + name='metering_mode', + field=models.CharField(max_length=64, null=True), + ), + migrations.AlterField( + model_name='photo', + name='shooting_mode', + field=models.CharField(max_length=64, null=True), + ), + migrations.AlterField( + model_name='phototag', + name='source', + field=models.CharField(choices=[('H', 'Human'), ('C', 'Computer')], db_index=True, max_length=1), + ), + migrations.AlterField( + model_name='tag', + name='source', + field=models.CharField(choices=[('H', 'Human'), ('C', 'Computer')], db_index=True, max_length=1), + ), + migrations.AlterField( + model_name='tag', + name='type', + field=models.CharField(choices=[('L', 'Location'), ('O', 'Object'), ('F', 'Face'), ('C', 'Color'), ('S', 'Style'), ('G', 'Generic'), ('E', 'Event')], db_index=True, max_length=1, null=True), + ), + migrations.AlterField( + model_name='task', + name='status', + field=models.CharField(choices=[('P', 'Pending'), ('S', 'Started'), ('C', 'Completed'), ('F', 'Failed')], db_index=True, default='P', max_length=1), + ), + ] diff --git a/photonix/photos/models.py b/photonix/photos/models.py index fafc542e..8a360293 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -122,9 +122,9 @@ class Photo(UUIDModel, VersionedModel): iso_speed = models.PositiveIntegerField(null=True) focal_length = models.DecimalField(max_digits=4, decimal_places=1, null=True) flash = models.NullBooleanField() - metering_mode = models.CharField(max_length=32, null=True) - drive_mode = models.CharField(max_length=32, null=True) - shooting_mode = models.CharField(max_length=32, null=True) + metering_mode = models.CharField(max_length=64, null=True) + drive_mode = models.CharField(max_length=64, null=True) + shooting_mode = models.CharField(max_length=64, null=True) camera = models.ForeignKey(Camera, related_name='photos', null=True, on_delete=models.CASCADE) lens = models.ForeignKey(Lens, related_name='photos', null=True, on_delete=models.CASCADE) latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True) @@ -240,8 +240,8 @@ class Tag(UUIDModel, VersionedModel): library = models.ForeignKey(Library, related_name='tags', on_delete=models.CASCADE) name = models.CharField(max_length=128) parent = models.ForeignKey('Tag', related_name='+', null=True, on_delete=models.CASCADE) - type = models.CharField(max_length=1, choices=TAG_TYPE_CHOICES, null=True) - source = models.CharField(max_length=1, choices=SOURCE_CHOICES) + type = models.CharField(max_length=1, choices=TAG_TYPE_CHOICES, null=True, db_index=True) + source = models.CharField(max_length=1, choices=SOURCE_CHOICES, db_index=True) ordering = models.FloatField(null=True) class Meta: @@ -255,7 +255,7 @@ def __str__(self): class PhotoTag(UUIDModel, VersionedModel): photo = models.ForeignKey(Photo, related_name='photo_tags', on_delete=models.CASCADE, null=True) tag = models.ForeignKey(Tag, related_name='photo_tags', on_delete=models.CASCADE) - source = models.CharField(max_length=1, choices=SOURCE_CHOICES) + source = models.CharField(max_length=1, choices=SOURCE_CHOICES, db_index=True) model_version = models.PositiveIntegerField(default=0) confidence = models.FloatField() significance = models.FloatField(null=True) @@ -285,7 +285,7 @@ def __str__(self): class Task(UUIDModel, VersionedModel): type = models.CharField(max_length=128, db_index=True) subject_id = models.UUIDField(db_index=True) - status = models.CharField(max_length=1, choices=TAG_TYPE_CHOICES, default='P', db_index=True) + status = models.CharField(max_length=1, choices=TASK_STATUS_CHOICES, default='P', db_index=True) started_at = models.DateTimeField(null=True) finished_at = models.DateTimeField(null=True) parent = models.ForeignKey('self', related_name='children', null=True, on_delete=models.CASCADE) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index a8e6b372..9e3f0a31 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -60,10 +60,6 @@ class PhotoInterface(graphene.Interface): class PhotoNode(DjangoObjectType): url = graphene.String() location = graphene.String() - location_tags = graphene.List(PhotoTagType) - object_tags = graphene.List(PhotoTagType) - color_tags = graphene.List(PhotoTagType) - style_tags = graphene.List(PhotoTagType) width = graphene.Int() height = graphene.Int() generic_tags = graphene.List(PhotoTagType) @@ -72,6 +68,12 @@ class PhotoNode(DjangoObjectType): base_file_id = graphene.UUID() download_url = graphene.String() + color_tags = graphene.List(PhotoTagType) + event_tags = graphene.List(PhotoTagType) + location_tags = graphene.List(PhotoTagType) + object_tags = graphene.List(PhotoTagType) + style_tags = graphene.List(PhotoTagType) + class Meta: model = Photo interfaces = (CustomNode, PhotoInterface) @@ -85,18 +87,6 @@ def resolve_url(self, info): size = settings.THUMBNAIL_SIZES[-1] return self.thumbnail_url(size) - def resolve_location_tags(self, info): - return self.photo_tags.filter(tag__type='L') - - def resolve_object_tags(self, info): - return self.photo_tags.filter(tag__type='O') - - def resolve_color_tags(self, info): - return self.photo_tags.filter(tag__type='C') - - def resolve_style_tags(self, info): - return self.photo_tags.filter(tag__type='S') - def resolve_width(self, info): return self.dimensions[0] @@ -118,6 +108,21 @@ def resolve_base_file_id(self, info): def resolve_download_url(self, info): return self.download_url + def resolve_location_tags(self, info): + return self.photo_tags.filter(tag__type='L') + + def resolve_object_tags(self, info): + return self.photo_tags.filter(tag__type='O') + + def resolve_color_tags(self, info): + return self.photo_tags.filter(tag__type='C') + + def resolve_event_tags(self, info): + return self.photo_tags.filter(tag__type='E') + + def resolve_style_tags(self, info): + return self.photo_tags.filter(tag__type='S') + class PhotoFilter(django_filters.FilterSet): multi_filter = CharFilter(method='multi_filter_filter') diff --git a/photonix/photos/utils/classification.py b/photonix/photos/utils/classification.py index 663d5775..e3734431 100644 --- a/photonix/photos/utils/classification.py +++ b/photonix/photos/utils/classification.py @@ -9,6 +9,7 @@ CLASSIFIERS = [ 'color', + 'event', 'location', 'object', 'style', diff --git a/photonix/photos/utils/db.py b/photonix/photos/utils/db.py index 93c95193..9c9182e0 100644 --- a/photonix/photos/utils/db.py +++ b/photonix/photos/utils/db.py @@ -40,10 +40,11 @@ def record_photo(path, library, inotify_event_type=None): break camera = None - camera_make = metadata.get('Make', '') + camera_make = metadata.get('Make', '')[:Camera.make.field.max_length] camera_model = metadata.get('Camera Model Name', '') if camera_model: camera_model = camera_model.replace(camera_make, '').strip() + camera_model = camera_model[:Camera.model.field.max_length] if camera_make and camera_model: try: camera = Camera.objects.get(library_id=library_id, make=camera_make, model=camera_model) @@ -108,15 +109,15 @@ def record_photo(path, library, inotify_event_type=None): photo = Photo( library_id=library_id, taken_at=date_taken, - taken_by=metadata.get('Artist') or None, + taken_by=metadata.get('Artist', '')[:Photo.taken_by.field.max_length] or None, aperture=aperture, - exposure=metadata.get('Exposure Time') or None, + exposure=metadata.get('Exposure Time', '')[:Photo.exposure.field.max_length] or None, iso_speed=iso_speed, focal_length=metadata.get('Focal Length') and metadata.get('Focal Length').split(' ', 1)[0] or None, flash=metadata.get('Flash') and 'on' in metadata.get('Flash').lower() or False, - metering_mode=metadata.get('Metering Mode') or None, - drive_mode=metadata.get('Drive Mode') or None, - shooting_mode=metadata.get('Shooting Mode') or None, + metering_mode=metadata.get('Metering Mode', '')[:Photo.metering_mode.field.max_length] or None, + drive_mode=metadata.get('Drive Mode', '')[:Photo.drive_mode.field.max_length] or None, + shooting_mode=metadata.get('Shooting Mode', '')[:Photo.shooting_mode.field.max_length] or None, camera=camera, lens=lens, latitude=latitude, diff --git a/system/supervisord.conf b/system/supervisord.conf index de87ddc0..436419dc 100644 --- a/system/supervisord.conf +++ b/system/supervisord.conf @@ -58,7 +58,7 @@ stdout_logfile_maxbytes=0 [program:raw_scheduler] command=bash -c "sleep 5 && nice -n 17 python /srv/photonix/manage.py raw_scheduler" -startsecs=14 +startsecs=15 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -67,7 +67,7 @@ stdout_logfile_maxbytes=0 [program:raw_processor] command=bash -c "sleep 6 && nice -n 17 python /srv/photonix/manage.py raw_processor" -startsecs=15 +startsecs=16 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -76,7 +76,7 @@ stdout_logfile_maxbytes=0 [program:thumbnail_scheduler] command=bash -c "sleep 7 && nice -n 17 python /srv/photonix/manage.py thumbnail_processor" -startsecs=16 +startsecs=17 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -85,7 +85,7 @@ stdout_logfile_maxbytes=0 [program:classification_scheduler] command=bash -c "sleep 8 && nice -n 18 python /srv/photonix/manage.py classification_scheduler" -startsecs=17 +startsecs=18 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -94,7 +94,7 @@ stdout_logfile_maxbytes=0 [program:classification_color_processor] command=bash -c "sleep 9 && nice -n 19 python /srv/photonix/manage.py classification_color_processor" -startsecs=18 +startsecs=19 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -103,7 +103,16 @@ stdout_logfile_maxbytes=0 [program:classification_location_processor] command=bash -c "sleep 10 && nice -n 19 python /srv/photonix/manage.py classification_location_processor" -startsecs=19 +startsecs=20 +environment=PYTHONPATH=/srv +stderr_logfile=/dev/stderr +stdout_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_logfile_maxbytes=0 + +[program:classification_event_processor] +command=bash -c "sleep 11 && nice -n 19 python /srv/photonix/manage.py classification_event_processor" +startsecs=21 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -111,8 +120,8 @@ stderr_logfile_maxbytes=0 stdout_logfile_maxbytes=0 [program:classification_style_processor] -command=bash -c "sleep 11 && nice -n 19 python /srv/photonix/manage.py classification_style_processor" -startsecs=20 +command=bash -c "sleep 12 && nice -n 19 python /srv/photonix/manage.py classification_style_processor" +startsecs=22 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -120,8 +129,8 @@ stderr_logfile_maxbytes=0 stdout_logfile_maxbytes=0 [program:classification_object_processor] -command=bash -c "sleep 12 && nice -n 19 python /srv/photonix/manage.py classification_object_processor" -startsecs=21 +command=bash -c "sleep 13 && nice -n 19 python /srv/photonix/manage.py classification_object_processor" +startsecs=23 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout diff --git a/ui/src/components/PhotoMetadata.js b/ui/src/components/PhotoMetadata.js index c549011f..3deb85b3 100644 --- a/ui/src/components/PhotoMetadata.js +++ b/ui/src/components/PhotoMetadata.js @@ -343,6 +343,18 @@ const PhotoMetadata = ({ ) : ( '' )} + {photo.eventTags.length ? ( +
      +

      Events

      +
        + {photo.eventTags.map((photoTag, index) => ( +
      • {photoTag.tag.name}
      • + ))} +
      +
      + ) : ( + '' + )}

      Tags diff --git a/ui/src/containers/PhotoDetailContainer.js b/ui/src/containers/PhotoDetailContainer.js index 350b7d38..ce9beb2d 100644 --- a/ui/src/containers/PhotoDetailContainer.js +++ b/ui/src/containers/PhotoDetailContainer.js @@ -61,6 +61,13 @@ const GET_PHOTO = gql` } significance } + eventTags { + id + tag { + name + } + significance + } styleTags { id tag { From 590cd4b9eea08cb43bfe4cf5b27f7d14d2d02ae6 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Fri, 4 Jun 2021 23:57:14 +0100 Subject: [PATCH 043/110] Filtering by event --- photonix/photos/schema.py | 18 ++++++++++++++++++ tests/test_graphql.py | 4 ++++ ui/src/components/SearchInput.js | 2 ++ ui/src/containers/FiltersContainer.js | 8 ++++++++ ui/src/static/images/event.svg | 1 + 5 files changed, 33 insertions(+) create mode 100644 ui/src/static/images/event.svg diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 9e3f0a31..66148f7a 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -180,6 +180,11 @@ class Meta: model = Tag +class EventTagType(DjangoObjectType): + class Meta: + model = Tag + + class LibrarySetting(graphene.ObjectType): """To pass fields for library settingg query api.""" @@ -218,6 +223,7 @@ class Query(graphene.ObjectType): all_person_tags = graphene.List(PersonTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) all_color_tags = graphene.List(ColorTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) all_style_tags = graphene.List(StyleTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) + all_event_tags = graphene.List(EventTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) all_generic_tags = graphene.List(LocationTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) library_setting = graphene.Field(LibrarySetting, library_id=graphene.UUID()) photo_file_metadata = graphene.Field(PhotoMetadataFields, photo_file_id=graphene.UUID()) @@ -366,6 +372,18 @@ def resolve_all_style_tags(self, info, **kwargs): return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='S', photo_tags__photo__in=photos_list).distinct() return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='S') + def resolve_all_event_tags(self, info, **kwargs): + user = info.context.user + if kwargs.get('multi_filter'): + if not kwargs.get('library_id'): + raise GraphQLError('library_id not supplied!') + filters = kwargs.get('multi_filter').split(' ') + photos_list = filter_photos_queryset( + filters, Photo.objects.filter(library__users__user=user), + kwargs.get('library_id')) + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='E', photo_tags__photo__in=photos_list).distinct() + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='E') + def resolve_all_generic_tags(self, info, **kwargs): user = info.context.user if kwargs.get('multi_filter'): diff --git a/tests/test_graphql.py b/tests/test_graphql.py index f2539dcd..5055426f 100644 --- a/tests/test_graphql.py +++ b/tests/test_graphql.py @@ -688,6 +688,10 @@ def test_response_of_get_filters_api(self): id name } + allEventTags(libraryId: $libraryId, multiFilter: $multiFilter) { + id + name + } allCameras(libraryId: $libraryId) { id make diff --git a/ui/src/components/SearchInput.js b/ui/src/components/SearchInput.js index 656a7b83..12ba3780 100644 --- a/ui/src/components/SearchInput.js +++ b/ui/src/components/SearchInput.js @@ -7,6 +7,7 @@ import { ReactComponent as ObjectsIcon } from '../static/images/label.svg' import { ReactComponent as LocationsIcon } from '../static/images/location_on.svg' import { ReactComponent as ColorsIcon } from '../static/images/color_lens.svg' import { ReactComponent as StylesIcon } from '../static/images/style.svg' +import { ReactComponent as EventsIcon } from '../static/images/event.svg' import { ReactComponent as CamerasIcon } from '../static/images/photo_camera.svg' import { ReactComponent as StarIcon } from '../static/images/star_outline.svg' @@ -16,6 +17,7 @@ const GROUP_ICONS = { Locations: LocationsIcon, Colors: ColorsIcon, Styles: StylesIcon, + Events: EventsIcon, Cameras: CamerasIcon, Lenses: CamerasIcon, Aperture: CamerasIcon, diff --git a/ui/src/containers/FiltersContainer.js b/ui/src/containers/FiltersContainer.js index 7fbf3f2d..a6c5aab6 100644 --- a/ui/src/containers/FiltersContainer.js +++ b/ui/src/containers/FiltersContainer.js @@ -31,6 +31,10 @@ const GET_FILTERS = gql` id name } + allEventTags(libraryId: $libraryId, multiFilter: $multiFilter) { + id + name + } allCameras(libraryId: $libraryId) { id make @@ -136,6 +140,10 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle, searchAreaExpand }) const stylesTags = getFilterdData('Styles', data.allStyleTags) filterData.push(createFilterSelection('Styles', stylesTags)) } + if (data.allEventTags.length) { + const eventsTags = getFilterdData('Events', data.allEventTags) + filterData.push(createFilterSelection('Events', eventsTags)) + } if (data.allCameras.length) { filterData.push({ name: 'Cameras', diff --git a/ui/src/static/images/event.svg b/ui/src/static/images/event.svg new file mode 100644 index 00000000..c4d40720 --- /dev/null +++ b/ui/src/static/images/event.svg @@ -0,0 +1 @@ + \ No newline at end of file From aca94109bced45e7352a286927a5f1a01ff5eaf5 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Tue, 8 Jun 2021 22:03:37 +0100 Subject: [PATCH 044/110] Error handling if ANN has been buit but is empty --- photonix/classifiers/face/model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index e8027f0a..ad811d54 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -105,7 +105,8 @@ def find_closest_face_tag_by_ann(self, source_embedding): with open(tag_ids_path) as f: tag_ids = json.loads(f.read()) nearest = t.get_nns_by_vector(source_embedding, 1, include_distances=True) - return tag_ids[nearest[0][0]], nearest[1][0] + if nearest[0]: + return tag_ids[nearest[0][0]], nearest[1][0] return (None, 999) From 045c8068933d10b71bb24665009570e1e3b11f37 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Tue, 8 Jun 2021 22:03:37 +0100 Subject: [PATCH 045/110] Error handling if ANN has been built but is empty --- photonix/classifiers/face/model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index e8027f0a..ad811d54 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -105,7 +105,8 @@ def find_closest_face_tag_by_ann(self, source_embedding): with open(tag_ids_path) as f: tag_ids = json.loads(f.read()) nearest = t.get_nns_by_vector(source_embedding, 1, include_distances=True) - return tag_ids[nearest[0][0]], nearest[1][0] + if nearest[0]: + return tag_ids[nearest[0][0]], nearest[1][0] return (None, 999) From a068776fc19906427e14426b0ade1a9a2243872e Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 9 Jun 2021 20:16:49 +0100 Subject: [PATCH 046/110] Don't create ANN until at least one Tag of type face has been created --- .../commands/retrain_face_similarity_index.py | 8 ++++++-- ui/src/components/onboarding/Step3CreateLibrary.js | 10 ++++++---- 2 files changed, 12 insertions(+), 6 deletions(-) diff --git a/photonix/photos/management/commands/retrain_face_similarity_index.py b/photonix/photos/management/commands/retrain_face_similarity_index.py index b3fecf26..e7e1227b 100644 --- a/photonix/photos/management/commands/retrain_face_similarity_index.py +++ b/photonix/photos/management/commands/retrain_face_similarity_index.py @@ -27,8 +27,12 @@ def retrain_face_similarity_index(self): for library in Library.objects.all(): start = time() print(f'Updating ANN index for Library {library.id}') - if version_date and PhotoTag.objects.filter(updated_at__gt=version_date).count() == 0: - print(' No new PhotoTags in Library so no point in updating face ANN index') + + if PhotoTag.objects.filter(tag__type='F').count() == 0: + print(' No Face PhotoTags in Library so no point in creating face ANN index yet') + return + if version_date and PhotoTag.objects.filter(updated_at__gt=version_date, tag__type='F').count() == 0: + print(' No new Face PhotoTags in Library so no point in updating face ANN index') return FaceModel(library_id=library.id).retrain_face_similarity_index() diff --git a/ui/src/components/onboarding/Step3CreateLibrary.js b/ui/src/components/onboarding/Step3CreateLibrary.js index 58e398ef..ab445ced 100644 --- a/ui/src/components/onboarding/Step3CreateLibrary.js +++ b/ui/src/components/onboarding/Step3CreateLibrary.js @@ -87,10 +87,12 @@ const Step3CreateLibrary = ({ history }) => { {state.data.storageBackend === 'Lo' && (

      - The base path will need to be writeable so that we can put new - files here, and also needs to be large enough to store your whole - collection. If you’re running in a container, feel free to restart - it with new mounted volumes if you need to. + Leave base path as the default unless you have configured multiple + volumes for multiple libraries. The base path will need to be + writeable so that we can put new files here, and also needs to be + large enough to store your whole collection. If you’re running in + a container, feel free to restart it with new mounted volumes if + you need to.

      Date: Wed, 9 Jun 2021 21:49:06 +0100 Subject: [PATCH 047/110] Minor tweaks to user creation script --- .../photos/management/commands/create_user.py | 45 ++++++++++--------- 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/photonix/photos/management/commands/create_user.py b/photonix/photos/management/commands/create_user.py index a023ec8c..a6d068fa 100644 --- a/photonix/photos/management/commands/create_user.py +++ b/photonix/photos/management/commands/create_user.py @@ -1,81 +1,84 @@ - +from getpass import getpass import sys + from django.contrib.auth import get_user_model from django.core.management.base import BaseCommand + from photonix.photos.models import Library, LibraryUser + User = get_user_model() class Command(BaseCommand): - """Management command to create user and assign him to libararies.""" + '''Management command to create user and assign them to libararies.''' help = 'Assign library to user' def create_user(self, username, password): - """To create user and assign him to libraries.""" + '''To create user and assign to libraries.''' if not username: - username = input(" Please enter username : ") + username = input('\nPlease enter username: ') if User.objects.filter(username=username).exists(): - print("\n User already exists with username ", username, ". ") + print(f'User "{username}" already exists') self.show_libraries_list(User.objects.get(username=username)) else: self.validate_password(username, password) def show_libraries_list(self, user): - """Method to show library list.""" - print("\n Here is the list of libraries.\n ") + '''Method to show library list.''' + print('\nCurrent libraries:\n ') lib_num_obj_pair_list = [] lib_sequence_list = [] for count, lib_obj in enumerate(Library.objects.all(), start=1): - print(" ", count, " ", lib_obj.name) + print(f' {count}) {lib_obj.name}') lib_num_obj_pair_list.append((count, lib_obj)) lib_sequence_list.append(count) self.assign_user_to_library(lib_num_obj_pair_list, user, lib_sequence_list) def validate_password(self, username, password=None): - """Method to validate the password.""" + '''Method to validate the password.''' if not password: - password = input("\n Please enter password : ") + password = getpass('Please enter password (hidden): ') if len(password) >= 8: user = User.objects.create(username=username) user.set_password(password) user.save() - print(" User created with name ", username) + print(f'\nUser created with name "{username}"') self.show_libraries_list(user) else: - print(" Password must be at least 8 characters long!") + print('Password must be at least 8 characters long!') self.validate_password(username) def assign_user_to_library(self, lib_num_obj_pair_list, user, lib_sequence_list): - """Method to assign user to selected libarary.""" - entered_lib_num = input("\n Please enter a library number. ") + '''Method to assign user to selected libarary.''' + entered_lib_num = input('\nPlease enter the number of a library you want the user to be able to access: ') if not (entered_lib_num.isdigit() and int(entered_lib_num) in lib_sequence_list): - print(" You have entered invalid library number.") + print('You have entered invalid library number.') self.assign_user_to_library(lib_num_obj_pair_list, user, lib_sequence_list) for sequence_number, obj in lib_num_obj_pair_list: if int(entered_lib_num) == sequence_number: LibraryUser.objects.get_or_create(library=obj, user=user, owner=True) - print(" User ", user.username, "assigned to library ", obj.name, "\n") + print(f'\nUser "{user.username}" assigned to library "{obj.name}"\n') self.continue_the_process(lib_num_obj_pair_list, user, lib_sequence_list) def continue_the_process(self, lib_num_obj_pair_list, user, lib_sequence_list): - """Method to continue the process if user wants to allocate user object to another libraries.""" - continue_or_not = input(" Do you want to add user to another library ?. Enter Y or N: ") + '''Method to continue the process if user wants to allocate user object to another libraries.''' + continue_or_not = input('Do you want to add user to another library? Enter Y or N: ') if continue_or_not.upper() == 'Y': self.assign_user_to_library(lib_num_obj_pair_list, user, lib_sequence_list) elif continue_or_not.upper() == 'N': sys.exit() # we can also write here 'pass' but to avoid unnecessary loop running we used exit() else: - print(" Please enter only Y or N") + print('Please enter only Y or N') self.continue_the_process(lib_num_obj_pair_list, user, lib_sequence_list) def add_arguments(self, parser): - """To pass argumentes in management command.""" + '''To pass argumentes in management command.''' # Optional or named arguments parser.add_argument('--username', type=str, help='Take username') parser.add_argument('--password', type=str, help='Take password') def handle(self, *args, **options): - """Method in which we call management command with passed arguments.""" + '''Method in which we call management command with passed arguments.''' self.create_user(options.get('username'), options.get('password')) From d555f58a42367b736710f20d89e8324441463697 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 9 Jun 2021 21:53:51 +0100 Subject: [PATCH 048/110] Symlink to manage.py --- docker/Dockerfile.dev | 1 + docker/Dockerfile.prd | 1 + manage.py | 1 + 3 files changed, 3 insertions(+) create mode 120000 manage.py diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index bce3561d..6bd240ce 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -66,6 +66,7 @@ RUN cd /srv/ui && yarn install # Copy over the code COPY photonix /srv/photonix COPY test.py /srv/test.py +COPY manage.py /srv/manage.py COPY tests /srv/tests COPY ui/public /srv/ui/public COPY ui/src /srv/ui/src diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index 80e641e3..40a7003f 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -120,6 +120,7 @@ WORKDIR /srv # Copy over the code COPY photonix /srv/photonix +COPY manage.py /srv/manage.py COPY test.py /srv/test.py COPY tests /srv/tests COPY ui/public /srv/ui/public diff --git a/manage.py b/manage.py new file mode 120000 index 00000000..ff066519 --- /dev/null +++ b/manage.py @@ -0,0 +1 @@ +photonix/manage.py \ No newline at end of file From 98311b8b80d28ac6e6d92dbae3bcf987d5027e7a Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 9 Jun 2021 22:30:25 +0100 Subject: [PATCH 049/110] Fix for housekeeping script error --- photonix/photos/management/commands/housekeeping.py | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/photonix/photos/management/commands/housekeeping.py b/photonix/photos/management/commands/housekeeping.py index 3db8c916..0a3be620 100644 --- a/photonix/photos/management/commands/housekeeping.py +++ b/photonix/photos/management/commands/housekeeping.py @@ -15,11 +15,14 @@ class Command(BaseCommand): def housekeeping(self): # Remove old cache directories - for directory in os.listdir(settings.THUMBNAIL_ROOT): - if directory not in ['photofile']: - path = Path(settings.THUMBNAIL_ROOT) / directory - print(f'Removing old cache directory {path}') - rmtree(path) + try: + for directory in os.listdir(settings.THUMBNAIL_ROOT): + if directory not in ['photofile']: + path = Path(settings.THUMBNAIL_ROOT) / directory + print(f'Removing old cache directory {path}') + rmtree(path) + except FileNotFoundError: # In case thumbnail dir hasn't been created yet + pass # Regenerate any outdated thumbnails photos = Photo.objects.filter(thumbnailed_version__lt=THUMBNAILER_VERSION) From 0152ecc183e77c5eccff714e7f433b037046d8e7 Mon Sep 17 00:00:00 2001 From: GyanP Date: Thu, 10 Jun 2021 23:25:53 +0530 Subject: [PATCH 050/110] partially done the task #124 --- photonix/photos/admin.py | 6 ++-- .../management/commands/import_demo_photos.py | 1 + ...009_library_classification_face_enabled.py | 18 ++++++++++ photonix/photos/models.py | 1 + photonix/photos/schema.py | 34 +++++++++++++++++++ photonix/photos/utils/metadata.py | 1 - tests/factories.py | 1 + ui/src/components/BoundingBoxes.js | 20 ++++++++++- ui/src/components/ModalForm.js | 1 + ui/src/components/Settings.js | 26 +++++++++++--- ui/src/components/onboarding/Step5Search.js | 12 +++++++ ui/src/graphql/onboarding.js | 2 ++ ui/src/graphql/settings.js | 16 +++++++++ ui/src/static/images/block_white.svg | 1 + ui/src/static/images/edit_white.svg | 1 + 15 files changed, 131 insertions(+), 10 deletions(-) create mode 100644 photonix/photos/migrations/0009_library_classification_face_enabled.py create mode 100644 ui/src/static/images/block_white.svg create mode 100644 ui/src/static/images/edit_white.svg diff --git a/photonix/photos/admin.py b/photonix/photos/admin.py index 66c6f016..00a81b79 100644 --- a/photonix/photos/admin.py +++ b/photonix/photos/admin.py @@ -24,14 +24,14 @@ class LibraryPathInline(admin.TabularInline): class LibraryAdmin(VersionedAdmin): - list_display = ('name', 'classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'setup_stage_completed', 'created_at', 'updated_at') + list_display = ('name', 'classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'classification_face_enabled', 'setup_stage_completed', 'created_at', 'updated_at') list_ordering = ('name',) - list_filter = ('classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'setup_stage_completed',) + list_filter = ('classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'classification_face_enabled' ,'setup_stage_completed',) inlines = [LibraryUserInline, LibraryPathInline] fieldsets = ( (None, { - 'fields': ('name', 'classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'setup_stage_completed'), + 'fields': ('name', 'classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'classification_face_enabled','setup_stage_completed'), }), ) + VersionedAdmin.fieldsets diff --git a/photonix/photos/management/commands/import_demo_photos.py b/photonix/photos/management/commands/import_demo_photos.py index 9cdf1a33..8b6b2230 100644 --- a/photonix/photos/management/commands/import_demo_photos.py +++ b/photonix/photos/management/commands/import_demo_photos.py @@ -49,6 +49,7 @@ def import_photos(self): classification_location_enabled=True, classification_style_enabled=True, classification_object_enabled=True, + classification_face_enabled=True, setup_stage_completed='Th' ) # LibraryPath as locally mounted volume diff --git a/photonix/photos/migrations/0009_library_classification_face_enabled.py b/photonix/photos/migrations/0009_library_classification_face_enabled.py new file mode 100644 index 00000000..4fc11522 --- /dev/null +++ b/photonix/photos/migrations/0009_library_classification_face_enabled.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.3 on 2021-06-10 12:52 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('photos', '0008_auto_20210603_1442'), + ] + + operations = [ + migrations.AddField( + model_name='library', + name='classification_face_enabled', + field=models.BooleanField(default=False, help_text='Run face detection on photos?'), + ), + ] diff --git a/photonix/photos/models.py b/photonix/photos/models.py index 1b7e602b..f368ce22 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -25,6 +25,7 @@ class Library(UUIDModel, VersionedModel): classification_location_enabled = models.BooleanField(default=False, help_text='Run location detection on photos?') classification_style_enabled = models.BooleanField(default=False, help_text='Run style classification on photos?') classification_object_enabled = models.BooleanField(default=False, help_text='Run object detection on photos?') + classification_face_enabled = models.BooleanField(default=False, help_text='Run face detection on photos?') setup_stage_completed = models.CharField(max_length=2, choices=LIBRARY_SETUP_STAGE_COMPLETED_CHOICES, blank=True, null=True, help_text='Where the user got to during onboarding setup') class Meta: diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 84c86529..0e2c93ad 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -407,6 +407,7 @@ class LibraryInput(graphene.InputObjectType): classification_location_enabled = graphene.Boolean() classification_style_enabled = graphene.Boolean() classification_object_enabled = graphene.Boolean() + classification_face_enabled = graphene.Boolean() source_folder = graphene.String(required=False) user_id = graphene.ID() library_id = graphene.ID() @@ -536,6 +537,37 @@ def mutate(root, info, input=None): return UpdateLibraryObjectEnabled(ok=ok, classification_object_enabled=None) +class UpdateLibraryFaceEnabled(graphene.Mutation): + """To update data in database that will be passed from frontend FaceEnabled api.""" + + class Arguments: + """To set arguments in for mute method.""" + + input = LibraryInput(required=False) + + ok = graphene.Boolean() + classification_face_enabled = graphene.Boolean() + + @staticmethod + def mutate(root, info, input=None): + """Method to save the updated data for FaceEnabled api.""" + ok = False + user = info.context.user + libraries = Library.objects.filter(users__user=user, users__owner=True, id=input.library_id) + if libraries and str(input.get('classification_face_enabled')) != 'None': + library_obj = libraries[0] + library_obj.classification_face_enabled = input.classification_face_enabled + library_obj.save() + ok = True + return UpdateLibraryFaceEnabled( + ok=ok, + classification_face_enabled=library_obj.classification_face_enabled) + if not libraries: + raise Exception('User is not the owner of library!') + else: + return UpdateLibraryFaceEnabled(ok=ok, classification_face_enabled=None) + + class UpdateLibrarySourceFolder(graphene.Mutation): """To update data in database that will be passed from frontend SourceFolder api.""" @@ -681,6 +713,7 @@ def mutate(self, info, input=None): library_obj.classification_location_enabled = input.classification_location_enabled library_obj.classification_style_enabled = input.classification_style_enabled library_obj.classification_object_enabled = input.classification_object_enabled + library_obj.classification_face_enabled = input.classification_face_enabled library_obj.save() user = User.objects.get(pk=input.user_id) user.has_configured_image_analysis = True @@ -798,6 +831,7 @@ class Mutation(graphene.ObjectType): update_location_enabled = UpdateLibraryLocationEnabled.Field() update_style_enabled = UpdateLibraryStyleEnabled.Field() update_object_enabled = UpdateLibraryObjectEnabled.Field() + update_face_enabled = UpdateLibraryFaceEnabled.Field() update_source_folder = UpdateLibrarySourceFolder.Field() create_library = CreateLibrary.Field() Photo_importing = PhotoImporting.Field() diff --git a/photonix/photos/utils/metadata.py b/photonix/photos/utils/metadata.py index 0c45b79a..af3184c2 100644 --- a/photonix/photos/utils/metadata.py +++ b/photonix/photos/utils/metadata.py @@ -78,7 +78,6 @@ def get_datetime(path): if not matched: matched = re.search(r'\D((19|20)[0-9]{2})([0-9]{2})([0-9]{2})\D', fn) if matched: - # import pdb; pdb.set_trace() date_str = '{}-{}-{}'.format(matched.group(1), matched.group(3), matched.group(4)) return datetime.strptime(date_str, '%Y-%m-%d') return None diff --git a/tests/factories.py b/tests/factories.py index f6e71d29..0cab88d7 100644 --- a/tests/factories.py +++ b/tests/factories.py @@ -26,6 +26,7 @@ class Meta: name = factory.Sequence(lambda n: f'Test Library {n}') classification_location_enabled = True classification_object_enabled = True + classification_face_enabled = True class LibraryUserFactory(factory.django.DjangoModelFactory): class Meta: diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 674bb308..d125a421 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -1,5 +1,7 @@ -import React from 'react' +import React, { useState } from 'react' import styled from '@emotion/styled' +import { ReactComponent as EditIcon } from '../static/images/edit_white.svg' +import { ReactComponent as BlockIcon } from '../static/images/block_white.svg' const Container = styled('div')` width: 100%; @@ -37,6 +39,16 @@ const Container = styled('div')` text-shadow: 0 0 2px #ff0; } } + .FeatureIconEdit{ + position: absolute; + bottom: 0px; + right: 3px; + } + .FeatureIconDelete{ + position: absolute; + bottom: 0px; + right: 30px; + } } } @@ -53,6 +65,7 @@ const Container = styled('div')` ` const BoundingBoxes = ({ boxes, className }) => { + const [editMode, setEditMode] = useState(false) return ( {boxes?.map((box, index) => { @@ -67,8 +80,13 @@ const BoundingBoxes = ({ boxes, className }) => { style={{ left: left, top: top, width: width, height: height }} >
      + {console.log(editMode)} {box.name}
      + {className === "face" && <> + setEditMode(!editMode)} /> + setEditMode(!editMode)} /> + }

      ) })} diff --git a/ui/src/components/ModalForm.js b/ui/src/components/ModalForm.js index e6f00e30..8d3c0af3 100644 --- a/ui/src/components/ModalForm.js +++ b/ui/src/components/ModalForm.js @@ -134,6 +134,7 @@ const ModalForm = ({ classificationStyleEnabled: data.classificationStyleEnabled, classificationObjectEnabled: data.classificationObjectEnabled, classificationLocationEnabled: data.classificationLocationEnabled, + classificationFaceEnabled: data.classificationFaceEnabled, userId: envData.environment.userId, libraryId: LibraryId ? LibraryId : envData.environment.libraryId, }, diff --git a/ui/src/components/Settings.js b/ui/src/components/Settings.js index 37f63ca9..0aebb8e6 100644 --- a/ui/src/components/Settings.js +++ b/ui/src/components/Settings.js @@ -19,6 +19,7 @@ import { SETTINGS_COLOR, SETTINGS_LOCATION, SETTINGS_OBJECT, + SETTINGS_FACE, SETTINGS_SOURCE_FOLDER, GET_SETTINGS, } from '../graphql/settings' @@ -59,6 +60,11 @@ export default function Settings() { type: 'boolean', label: 'Run object detection on photos?', }, + { + key: 'classificationFaceEnabled', + type: 'boolean', + label: 'Run face detection on photos?', + }, ] function toggleBooleanSetting(key) { @@ -72,7 +78,7 @@ export default function Settings() { classificationStyleEnabled: newSettings.classificationStyleEnabled, libraryId: activeLibrary?.id, }, - }).catch((e) => {}) + }).catch((e) => { }) return key case 'classificationLocationEnabled': settingUpdateLocation({ @@ -81,7 +87,7 @@ export default function Settings() { newSettings.classificationLocationEnabled, libraryId: activeLibrary?.id, }, - }).catch((e) => {}) + }).catch((e) => { }) return key case 'classificationObjectEnabled': settingUpdateObject({ @@ -90,7 +96,7 @@ export default function Settings() { newSettings.classificationObjectEnabled, libraryId: activeLibrary?.id, }, - }).catch((e) => {}) + }).catch((e) => { }) return key case 'classificationColorEnabled': settingUpdateColor({ @@ -98,7 +104,16 @@ export default function Settings() { classificationColorEnabled: newSettings.classificationColorEnabled, libraryId: activeLibrary?.id, }, - }).catch((e) => {}) + }).catch((e) => { }) + return key + case 'classificationFaceEnabled': + settingUpdateFace({ + variables: { + classificationFaceEnabled: + newSettings.classificationFaceEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => { }) return key default: return null @@ -121,12 +136,13 @@ export default function Settings() { sourceFolder: newSettings.sourceDirs, libraryId: activeLibrary?.id, }, - }).catch((e) => {}) + }).catch((e) => { }) } const [settingUpdateStyle] = useMutation(SETTINGS_STYLE) const [settingUpdateColor] = useMutation(SETTINGS_COLOR) const [settingUpdateLocation] = useMutation(SETTINGS_LOCATION) const [settingUpdateObject] = useMutation(SETTINGS_OBJECT) + const [settingUpdateFace] = useMutation(SETTINGS_FACE) const [settingUpdateSourceFolder] = useMutation(SETTINGS_SOURCE_FOLDER) return ( diff --git a/ui/src/components/onboarding/Step5Search.js b/ui/src/components/onboarding/Step5Search.js index 19e26502..af03b415 100644 --- a/ui/src/components/onboarding/Step5Search.js +++ b/ui/src/components/onboarding/Step5Search.js @@ -96,6 +96,18 @@ const Step5Search = ({ history }) => { : true } /> + diff --git a/ui/src/graphql/onboarding.js b/ui/src/graphql/onboarding.js index 1e482292..8b2e1f95 100644 --- a/ui/src/graphql/onboarding.js +++ b/ui/src/graphql/onboarding.js @@ -127,6 +127,7 @@ mutation ( $classificationStyleEnabled: Boolean!, $classificationObjectEnabled: Boolean!, $classificationLocationEnabled: Boolean!, + $classificationFaceEnabled: Boolean!, $userId: ID!, $libraryId: ID!, ) { @@ -135,6 +136,7 @@ mutation ( classificationStyleEnabled:$classificationStyleEnabled, classificationObjectEnabled:$classificationObjectEnabled, classificationLocationEnabled:$classificationLocationEnabled, + classificationFaceEnabled:$classificationFaceEnabled, userId:$userId, libraryId:$libraryId, }) { diff --git a/ui/src/graphql/settings.js b/ui/src/graphql/settings.js index cdd13f98..49193202 100644 --- a/ui/src/graphql/settings.js +++ b/ui/src/graphql/settings.js @@ -60,6 +60,21 @@ export const SETTINGS_OBJECT = gql` } } ` +export const SETTINGS_FACE = gql` + mutation updateFaceEnabled( + $classificationFaceEnabled: Boolean! + $libraryId: ID + ) { + updateFaceEnabled( + input: { + classificationFaceEnabled: $classificationFaceEnabled + libraryId: $libraryId + } + ) { + classificationFaceEnabled + } + } +` export const SETTINGS_SOURCE_FOLDER = gql` mutation updateSourceFolder($sourceFolder: String!, $libraryId: ID) { updateSourceFolder( @@ -79,6 +94,7 @@ export const GET_SETTINGS = gql` classificationStyleEnabled classificationObjectEnabled classificationLocationEnabled + classificationFaceEnabled } sourceFolder } diff --git a/ui/src/static/images/block_white.svg b/ui/src/static/images/block_white.svg new file mode 100644 index 00000000..4a3302da --- /dev/null +++ b/ui/src/static/images/block_white.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/static/images/edit_white.svg b/ui/src/static/images/edit_white.svg new file mode 100644 index 00000000..6c7849fa --- /dev/null +++ b/ui/src/static/images/edit_white.svg @@ -0,0 +1 @@ + \ No newline at end of file From ce1729668b33fb6f06dca4dd1afbdfe02d7ebbd4 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 10 Jun 2021 22:41:13 +0100 Subject: [PATCH 051/110] Face click event fix and bounding box styling --- ui/src/components/BoundingBoxes.js | 27 ++++++++++++++++++--------- ui/src/components/ZoomableImage.js | 1 - 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index d125a421..bcb44b4b 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -12,6 +12,7 @@ const Container = styled('div')` border: 3px solid rgba(255, 0, 0, 0.75); position: absolute; border-radius: 6px; + overflow: hidden; .FeatureLabel { color: #fff; font-size: 14px; @@ -19,12 +20,11 @@ const Container = styled('div')` display: inline-block; overflow: hidden; max-width: 100%; - padding: 0 7px 2px 4px; + padding: 1px 7px 2px 4px; float: left; text-align: left; white-space: nowrap; pointer-events: all; - border-radius: 3px 3px 0 0; &:hover { overflow: visible; text-shadow: 0 0 2px #f00; @@ -39,12 +39,12 @@ const Container = styled('div')` text-shadow: 0 0 2px #ff0; } } - .FeatureIconEdit{ + .FeatureIconEdit { position: absolute; bottom: 0px; right: 3px; } - .FeatureIconDelete{ + .FeatureIconDelete { position: absolute; bottom: 0px; right: 30px; @@ -58,7 +58,6 @@ const Container = styled('div')` .FeatureLabel { font-size: 8px; padding: 0 3px 1px 3px; - border-radius: 5px 5px 0 0; } } } @@ -83,10 +82,20 @@ const BoundingBoxes = ({ boxes, className }) => { {console.log(editMode)} {box.name}
    - {className === "face" && <> - setEditMode(!editMode)} /> - setEditMode(!editMode)} /> - } + {className === 'face' && ( + <> + setEditMode(!editMode)} + /> + setEditMode(!editMode)} + /> + + )}
) })} diff --git a/ui/src/components/ZoomableImage.js b/ui/src/components/ZoomableImage.js index 3efd8ed3..504635f9 100644 --- a/ui/src/components/ZoomableImage.js +++ b/ui/src/components/ZoomableImage.js @@ -171,7 +171,6 @@ const ZoomableImage = ({ photoId, boxes, next, prev }) => { From f1abeeab92a1cbfcd46557c882fb77a21d5d9d7d Mon Sep 17 00:00:00 2001 From: GyanP Date: Wed, 16 Jun 2021 19:21:26 +0530 Subject: [PATCH 052/110] Task Completed --- photonix/photos/schema.py | 85 ++++++- ui/src/components/BoundingBoxes.js | 213 +++++++++++++++--- ui/src/components/PhotoDetail.js | 10 + ui/src/components/PhotoMetadata.js | 7 +- ui/src/components/ZoomableImage.js | 12 +- ui/src/containers/FiltersContainer.js | 5 +- ui/src/containers/PhotoDetailContainer.js | 3 + ui/src/graphql/tag.js | 28 +++ .../{block_white.svg => block_black.svg} | 2 +- ui/src/static/images/done_black.svg | 1 + ui/src/static/images/edit_white.svg | 1 - ui/src/stores/index.js | 2 + ui/src/stores/tag/index.js | 15 ++ ui/src/stores/tag/selector.js | 3 + 14 files changed, 342 insertions(+), 45 deletions(-) rename ui/src/static/images/{block_white.svg => block_black.svg} (83%) create mode 100644 ui/src/static/images/done_black.svg delete mode 100644 ui/src/static/images/edit_white.svg create mode 100644 ui/src/stores/tag/index.js create mode 100644 ui/src/stores/tag/selector.js diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 0e2c93ad..9d887a65 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -34,9 +34,15 @@ class Meta: class PhotoTagType(DjangoObjectType): + show_verify_icon = graphene.Boolean() + class Meta: model = PhotoTag + def resolve_show_verify_icon(self, info): + if self.tag.type == 'F' and not self.verified and self.tag.photo_tags.filter(verified=True).exists(): + return True + return False class PhotoFileType(DjangoObjectType): class Meta: @@ -180,12 +186,11 @@ class Meta: class LibrarySetting(graphene.ObjectType): - """To pass fields for library settingg query api.""" + """To pass fields for library setting query api.""" library = graphene.Field(LibraryType) source_folder = graphene.String() - class PhotoMetadataFields(graphene.ObjectType): """ Metadata about photo as extracted by exiftool """ data = graphene.types.generic.GenericScalar() @@ -339,7 +344,7 @@ def resolve_all_person_tags(self, info, **kwargs): filters, Photo.objects.filter(library__users__user=user), kwargs.get('library_id')) return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list).distinct() - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F') + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__deleted=False).distinct() def resolve_all_color_tags(self, info, **kwargs): user = info.context.user @@ -826,6 +831,77 @@ def mutate(self, info, selected_photo_file_id=None): return ChangePreferredPhotoFile(ok=True) +class EditFaceTag(graphene.Mutation): + """Face tagging for face Detection.""" + + class Arguments: + """Input arguments which will pass from frontend.""" + + photo_tag_id = graphene.ID() + new_name = graphene.String() + + ok = graphene.Boolean() + + @staticmethod + def mutate(self, info, photo_tag_id=None, new_name=None): + """Mutation to create or update face tags and assign them to photoTag.""" + obj = Tag.objects.filter(name=new_name, type='F').first() + photo_tag = PhotoTag.objects.get(id=photo_tag_id) + already_assigned_tag = photo_tag.tag + if obj: + photo_tag.tag = obj + photo_tag.save() + already_assigned_tag.photo_tags.all().count() or already_assigned_tag.delete() + else: + already_assigned_tag.name = new_name + already_assigned_tag.save() + photo_tag.verified = True + photo_tag.confidence = 1 + photo_tag.save() + return EditFaceTag(ok=True) + + +class BlockFaceTag(graphene.Mutation): + """Face tagging for face Detection.""" + + class Arguments: + """Input arguments which will pass from frontend.""" + + photo_tag_id = graphene.ID() + + ok = graphene.Boolean() + + @staticmethod + def mutate(self, info, photo_tag_id=None, new_name=None): + """Mutation to block a face 'F' type photoTag.""" + photo_tag = PhotoTag.objects.get(id=photo_tag_id) + photo_tag.deleted = True + photo_tag.verified = False + photo_tag.confidence = 0 + photo_tag.save() + return BlockFaceTag(ok=True) + + +class VerifyPhoto(graphene.Mutation): + """Face tagging for face Detection.""" + + class Arguments: + """Input arguments which will pass from frontend.""" + + photo_tag_id = graphene.ID() + + ok = graphene.Boolean() + + @staticmethod + def mutate(self, info, photo_tag_id=None, new_name=None): + """Mutation to set verify a face 'F' type photoTag.""" + photo_tag = PhotoTag.objects.get(id=photo_tag_id) + photo_tag.verified = True + photo_tag.confidence = 1 + photo_tag.save() + return VerifyPhoto(ok=True) + + class Mutation(graphene.ObjectType): update_color_enabled = UpdateLibraryColorEnabled.Field() update_location_enabled = UpdateLibraryLocationEnabled.Field() @@ -840,3 +916,6 @@ class Mutation(graphene.ObjectType): create_generic_tag = CreateGenricTag.Field() remove_generic_tag = RemoveGenericTag.Field() change_preferred_photo_file = ChangePreferredPhotoFile.Field() + edit_face_tag = EditFaceTag.Field() + block_face_tag = BlockFaceTag.Field() + verify_photo = VerifyPhoto.Field() diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index bcb44b4b..42b5a832 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -1,7 +1,13 @@ -import React, { useState } from 'react' +import React, { useState, useEffect, useRef } from 'react' import styled from '@emotion/styled' -import { ReactComponent as EditIcon } from '../static/images/edit_white.svg' -import { ReactComponent as BlockIcon } from '../static/images/block_white.svg' +import { useMutation } from '@apollo/client' +import { useDispatch, useSelector } from 'react-redux' +import { ReactComponent as EditIcon } from '../static/images/edit.svg' +import { ReactComponent as BlockIcon } from '../static/images/block_black.svg' +import { ReactComponent as DoneIcon } from '../static/images/done_black.svg' +import { EDIT_FACE_TAG, BLOCK_FACE_TAG, VERIFY_FACE_TAG } from '../graphql/tag' +import { isTagUpdated } from "../stores/tag/selector"; + const Container = styled('div')` width: 100%; @@ -31,23 +37,55 @@ const Container = styled('div')` } } &.face { - border-color: rgba(255, 255, 0, 0.75); - .FeatureLabel { - color: #000; - background-color: rgba(255, 255, 0, 0.75); - &:hover { - text-shadow: 0 0 2px #ff0; + &.yellowBox{ + border-color: rgba(255, 255, 0, 0.75); + .FeatureLabel { + color: #000; + background-color: rgba(255, 255, 0, 0.75); + &:hover { + text-shadow: 0 0 2px #ff0; + } } } - .FeatureIconEdit { + &.greenBox{ + border-color: rgba(9, 119, 56, 0.9); + .FeatureLabel { + color: #000; + background-color : rgba(9, 119, 56, 0.9); + &:hover { + text-shadow: 0 0 2px #ff0; + } + } + } + &.whiteBox{ + border-color: rgba(202, 202, 191, 0.95); + } + .FeatureEditText{ + color:#000 !important; + } + .icons{ position: absolute; bottom: 0px; - right: 3px; + right:0; + + .FeatureIconEdit { + background: #fff; + border-radius: 50%; + padding: 3px; + margin: 0 1px; } .FeatureIconDelete { - position: absolute; - bottom: 0px; - right: 30px; + background: red; + border-radius: 50%; + padding: 3px; + margin: 0 1px; + } + .FeatureIconDone { + background: #2ff16df0; + border-radius: 50%; + padding: 3px; + margin: 0 1px; + } } } } @@ -62,9 +100,88 @@ const Container = styled('div')` } } ` +const BoundingBoxes = ({ boxes, className, refetch }) => { + const dispatch = useDispatch() + const ref = useRef(null) + const [editLableId, setEditLableId] = useState('') + const [tagName, setTagName] = useState(null) + const [editFaceTag] = useMutation(EDIT_FACE_TAG) + const [blockFaceTag] = useMutation(BLOCK_FACE_TAG) + const [verifyPhoto] = useMutation(VERIFY_FACE_TAG) + const tagUpdated = useSelector(isTagUpdated) + + const onHandleBlock = (photoTagId) => { + blockFaceTag({ + variables: { + photoTagId: photoTagId + }, + }) + .then((res) => { + if (res.data.blockFaceTag.ok){ + refetch() + dispatch({ + type: 'IS_TAG_UPDATE', + payload: {updated:!tagUpdated}, + }) + } + }).catch((e) => { }) + } + + const onSaveLable = (photoTagId) => { + editFaceTag({ + variables: { + photoTagId: photoTagId, + newName: tagName, + }, + }) + .then((res) => { + setEditLableId('') + setTagName(null) + if (res.data.editFaceTag.ok) { + refetch() + dispatch({ + type: 'IS_TAG_UPDATE', + payload: {updated:!tagUpdated}, + }) + } + }) + .catch((e) => { + setEditLableId('') + setTagName(null) + }) + } + + const onChangeLable = (event, photoTagId) => { + setTagName(event.target.value) + if (event.keyCode === 13) { + if (tagName) { + onSaveLable(photoTagId) + } + else { + setEditLableId('') + setTagName(null) + } + } + } + + const setVerifyPhoto = (photoTagId) => { + verifyPhoto({ + variables: { + photoTagId: photoTagId + }, + }) + .then((res) => { + if (res.data.verifyPhoto.ok) refetch() + }) + .catch((e) => { }) + } + + useEffect(() => { + if (ref?.current) { + ref.current.focus() + } + }, [editLableId]) -const BoundingBoxes = ({ boxes, className }) => { - const [editMode, setEditMode] = useState(false) return ( {boxes?.map((box, index) => { @@ -74,27 +191,57 @@ const BoundingBoxes = ({ boxes, className }) => { let height = box.sizeY * 100 + '%' return (
-
- {console.log(editMode)} - {box.name} -
- {className === 'face' && ( - <> - setEditMode(!editMode)} - /> - setEditMode(!editMode)} + { + !box.deleted ? editLableId == box.id ? + onChangeLable(e, box.id)} + ref={ref} /> - + : +
+ {box.name} +
: null + } + {className === 'face' && !box.deleted && ( +
+ { + editLableId == box.id ? + onSaveLable(box.id)} + /> + : + <> + { !box.verified && ( + onHandleBlock(box.id)} + /> + )} + { box.showVerifyIcon && ( + setVerifyPhoto(box.id)} + /> + )} + setEditLableId(box.id)} + /> + + } +
)}
) diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index d208443b..40393ed6 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -168,6 +168,10 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { } }, [photoId, prevNextPhotos, prevPhoto, nextPhoto]) + const setBoxColorClass = (tag) => { + return tag.deleted ? 'whiteBox' : tag.verified ? 'greenBox' : 'yellowBox'; + } + let boxes = { object: photo?.objectTags.map((objectTag) => { return { @@ -180,11 +184,16 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { }), face: photo?.personTags.map((tag) => { return { + id: tag.id, name: tag.tag.name, positionX: tag.positionX, positionY: tag.positionY, sizeX: tag.sizeX, sizeY: tag.sizeY, + verified: tag.verified, + deleted: tag.deleted, + boxColorClass: setBoxColorClass(tag), + showVerifyIcon: tag.showVerifyIcon, } }), } @@ -196,6 +205,7 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { boxes={showBoundingBox && boxes} next={nextPhoto} prev={prevPhoto} + refetch={refetch} />
!personTags.deleted); + return (
@@ -306,7 +309,7 @@ const PhotoMetadata = ({ />
)} - {photo.personTags.length > 0 && ( + {personTagsList.length > 0 && (

People @@ -317,7 +320,7 @@ const PhotoMetadata = ({ )}

    - {photo.personTags.map((photoTag, index) => ( + {personTagsList.map((photoTag, index) => (
  • {photoTag.tag.name}
  • ))}
diff --git a/ui/src/components/ZoomableImage.js b/ui/src/components/ZoomableImage.js index 504635f9..23c73ca0 100644 --- a/ui/src/components/ZoomableImage.js +++ b/ui/src/components/ZoomableImage.js @@ -5,7 +5,7 @@ import { TransformWrapper, TransformComponent } from 'react-zoom-pan-pinch' import { useSwipeable } from 'react-swipeable' import { useSelector } from 'react-redux' -import BoundingBoxes from './BoundingBoxes' +import BoundingBoxes from './BoundingBoxes' import Spinner from './Spinner' import { getPrevNextPhotos } from '../stores/photos/selector' @@ -78,7 +78,7 @@ const Container = styled('div')` } ` -const ZoomableImage = ({ photoId, boxes, next, prev }) => { +const ZoomableImage = ({ photoId, boxes, next, prev, refetch}) => { const [scale, setScale] = useState(1) const [zoom, setZoom] = useState(false) const [loading, setLoading] = useState(true) @@ -172,7 +172,7 @@ const ZoomableImage = ({ photoId, boxes, next, prev }) => { className={displayImage ? ' display' : undefined} key={index} > - + ))}
@@ -206,14 +206,20 @@ ZoomableImage.propTypes = { ), face: PropTypes.arrayOf( PropTypes.shape({ + id: PropTypes.string, name: PropTypes.string, positionX: PropTypes.number, positionY: PropTypes.number, sizeX: PropTypes.number, sizeY: PropTypes.number, + verified: PropTypes.bool, + deleted: PropTypes.bool, + boxColorClass: PropTypes.string, + showVerifyIcon: PropTypes.bool, }) ), }), + refetch: PropTypes.func, } export default ZoomableImage diff --git a/ui/src/containers/FiltersContainer.js b/ui/src/containers/FiltersContainer.js index b2f5944e..8da3449e 100644 --- a/ui/src/containers/FiltersContainer.js +++ b/ui/src/containers/FiltersContainer.js @@ -5,6 +5,7 @@ import gql from 'graphql-tag' import Filters from '../components/Filters' import Spinner from '../components/Spinner' import { getActiveLibrary } from '../stores/libraries/selector' +import { isTagUpdated } from "../stores/tag/selector"; const GET_FILTERS = gql` query AllFilters($libraryId: UUID, $multiFilter: String) { @@ -79,11 +80,11 @@ const FiltersContainer = ({ }) => { const user = useSelector((state) => state.user) // Using user here from Redux store so we can wait for any JWT tokens to be refreshed before running GraphQL queries that require authentication const activeLibrary = useSelector(getActiveLibrary) + const tagUpdated = useSelector(isTagUpdated) let filtersStr = '' if (activeLibrary) { filtersStr = `${selectedFilters.map((filter) => filter.id).join(' ')}` } - let variables = {} variables = { libraryId: activeLibrary?.id, multiFilter: filtersStr } const { loading, error, data, refetch } = useQuery( @@ -95,7 +96,7 @@ const FiltersContainer = ({ ) useEffect(() => { refetch() - }, [activeLibrary, refetch]) + }, [activeLibrary, refetch, tagUpdated]) const getFilterdData = (type, array) => { const filterArr = selectedFilters.filter((s) => s.group === type) diff --git a/ui/src/containers/PhotoDetailContainer.js b/ui/src/containers/PhotoDetailContainer.js index bd02f811..d867b68b 100644 --- a/ui/src/containers/PhotoDetailContainer.js +++ b/ui/src/containers/PhotoDetailContainer.js @@ -63,6 +63,9 @@ const GET_PHOTO = gql` positionY sizeX sizeY + verified + deleted + showVerifyIcon } colorTags { id diff --git a/ui/src/graphql/tag.js b/ui/src/graphql/tag.js index 817c7fef..452923e6 100644 --- a/ui/src/graphql/tag.js +++ b/ui/src/graphql/tag.js @@ -23,3 +23,31 @@ export const REMOVE_TAG = gql` } } ` +export const EDIT_FACE_TAG = gql` + mutation editFaceTag( + $photoTagId: ID!, + $newName: String!, + ) { + editFaceTag(photoTagId:$photoTagId, newName:$newName) { + ok + } + } +` +export const BLOCK_FACE_TAG = gql` + mutation blockFaceTag( + $photoTagId: ID!, + ) { + blockFaceTag(photoTagId:$photoTagId) { + ok + } + } +` +export const VERIFY_FACE_TAG = gql` + mutation verifyPhoto( + $photoTagId: ID!, + ) { + verifyPhoto(photoTagId:$photoTagId) { + ok + } + } +` \ No newline at end of file diff --git a/ui/src/static/images/block_white.svg b/ui/src/static/images/block_black.svg similarity index 83% rename from ui/src/static/images/block_white.svg rename to ui/src/static/images/block_black.svg index 4a3302da..ad9c488c 100644 --- a/ui/src/static/images/block_white.svg +++ b/ui/src/static/images/block_black.svg @@ -1 +1 @@ - \ No newline at end of file + \ No newline at end of file diff --git a/ui/src/static/images/done_black.svg b/ui/src/static/images/done_black.svg new file mode 100644 index 00000000..4a715722 --- /dev/null +++ b/ui/src/static/images/done_black.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/static/images/edit_white.svg b/ui/src/static/images/edit_white.svg deleted file mode 100644 index 6c7849fa..00000000 --- a/ui/src/static/images/edit_white.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/ui/src/stores/index.js b/ui/src/stores/index.js index 621e161f..ac8e5741 100644 --- a/ui/src/stores/index.js +++ b/ui/src/stores/index.js @@ -3,12 +3,14 @@ import layout from './layout' import libraries from './libraries' import photos from './photos' import user from './user' +import isTagUpdated from "./tag"; const reducers = combineReducers({ layout, libraries, photos, user, + isTagUpdated, }) export default reducers diff --git a/ui/src/stores/tag/index.js b/ui/src/stores/tag/index.js new file mode 100644 index 00000000..c4a477e8 --- /dev/null +++ b/ui/src/stores/tag/index.js @@ -0,0 +1,15 @@ +const IS_TAG_UPDATE = 'IS_TAG_UPDATE' + +const initialState = {updated:false} + +const isTagUpdated = (state = initialState, action = {}) => { + switch (action.type) { + case IS_TAG_UPDATE: + state.updated = action.payload.updated + return state + default: + return state + } +} + +export default isTagUpdated diff --git a/ui/src/stores/tag/selector.js b/ui/src/stores/tag/selector.js new file mode 100644 index 00000000..45a37586 --- /dev/null +++ b/ui/src/stores/tag/selector.js @@ -0,0 +1,3 @@ +export const isTagUpdated = (state) => { + return state.isTagUpdated.updated + } \ No newline at end of file From ba01f635894150449a817037829ad7b862312c48 Mon Sep 17 00:00:00 2001 From: GyanP Date: Wed, 16 Jun 2021 22:20:51 +0530 Subject: [PATCH 053/110] bug fix - set lat long when user dragging the map --- ui/src/components/MapView.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/ui/src/components/MapView.js b/ui/src/components/MapView.js index f9d3988a..1bc8319e 100644 --- a/ui/src/components/MapView.js +++ b/ui/src/components/MapView.js @@ -46,6 +46,10 @@ const MapView = ({ localStorage.setItem('lat', mapEvents.getCenter().lat) localStorage.setItem('lng', mapEvents.getCenter().lng) }, + dragend: () => { + localStorage.setItem('lat', mapEvents.getCenter().lat) + localStorage.setItem('lng', mapEvents.getCenter().lng) + }, }); const position = [latState? latState : mapEvents.getCenter().lat, lngState? lngState : mapEvents.getCenter().lng] const zoom = zoomState? zoomState : mapEvents.getZoom() From cb2f656cd9d903b8e89d93332a2f0019e1b25496 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 17 Jun 2021 00:12:51 +0100 Subject: [PATCH 054/110] Minor style tweaks --- ui/src/components/BoundingBoxes.js | 194 ++++++++++++++--------------- 1 file changed, 91 insertions(+), 103 deletions(-) diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 42b5a832..b0a1d1e3 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -6,8 +6,7 @@ import { ReactComponent as EditIcon } from '../static/images/edit.svg' import { ReactComponent as BlockIcon } from '../static/images/block_black.svg' import { ReactComponent as DoneIcon } from '../static/images/done_black.svg' import { EDIT_FACE_TAG, BLOCK_FACE_TAG, VERIFY_FACE_TAG } from '../graphql/tag' -import { isTagUpdated } from "../stores/tag/selector"; - +import { isTagUpdated } from '../stores/tag/selector' const Container = styled('div')` width: 100%; @@ -31,61 +30,50 @@ const Container = styled('div')` text-align: left; white-space: nowrap; pointer-events: all; - &:hover { - overflow: visible; - text-shadow: 0 0 2px #f00; - } } &.face { - &.yellowBox{ + &.yellowBox { border-color: rgba(255, 255, 0, 0.75); .FeatureLabel { color: #000; background-color: rgba(255, 255, 0, 0.75); - &:hover { - text-shadow: 0 0 2px #ff0; - } } } - &.greenBox{ - border-color: rgba(9, 119, 56, 0.9); + &.greenBox { + border-color: rgba(0, 255, 0, 0.75); .FeatureLabel { color: #000; - background-color : rgba(9, 119, 56, 0.9); - &:hover { - text-shadow: 0 0 2px #ff0; - } + background-color: rgba(0, 255, 0, 0.75); } } - &.whiteBox{ - border-color: rgba(202, 202, 191, 0.95); + &.whiteBox { + border-color: rgba(202, 202, 191, 0.5); } - .FeatureEditText{ - color:#000 !important; + .FeatureEditText { + color: #000 !important; + width: 100%; + border: 0; } - .icons{ + .icons { position: absolute; - bottom: 0px; - right:0; + bottom: 0; + right: 5px; - .FeatureIconEdit { - background: #fff; - border-radius: 50%; - padding: 3px; - margin: 0 1px; - } - .FeatureIconDelete { - background: red; - border-radius: 50%; - padding: 3px; - margin: 0 1px; - } - .FeatureIconDone { - background: #2ff16df0; - border-radius: 50%; - padding: 3px; - margin: 0 1px; - } + svg { + background: #fff; + border-radius: 50%; + padding: 3px; + margin: 0 1px; + cursor: pointer; + &.FeatureIconEdit { + } + &.FeatureIconDelete { + background: #f00; + } + &.FeatureIconDone { + background: #0f0; + } + } } } } @@ -109,22 +97,23 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { const [blockFaceTag] = useMutation(BLOCK_FACE_TAG) const [verifyPhoto] = useMutation(VERIFY_FACE_TAG) const tagUpdated = useSelector(isTagUpdated) - + const onHandleBlock = (photoTagId) => { blockFaceTag({ variables: { - photoTagId: photoTagId + photoTagId: photoTagId, }, }) - .then((res) => { - if (res.data.blockFaceTag.ok){ - refetch() - dispatch({ - type: 'IS_TAG_UPDATE', - payload: {updated:!tagUpdated}, - }) - } - }).catch((e) => { }) + .then((res) => { + if (res.data.blockFaceTag.ok) { + refetch() + dispatch({ + type: 'IS_TAG_UPDATE', + payload: { updated: !tagUpdated }, + }) + } + }) + .catch((e) => {}) } const onSaveLable = (photoTagId) => { @@ -134,21 +123,21 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { newName: tagName, }, }) - .then((res) => { - setEditLableId('') - setTagName(null) - if (res.data.editFaceTag.ok) { - refetch() - dispatch({ - type: 'IS_TAG_UPDATE', - payload: {updated:!tagUpdated}, - }) - } - }) - .catch((e) => { - setEditLableId('') - setTagName(null) - }) + .then((res) => { + setEditLableId('') + setTagName(null) + if (res.data.editFaceTag.ok) { + refetch() + dispatch({ + type: 'IS_TAG_UPDATE', + payload: { updated: !tagUpdated }, + }) + } + }) + .catch((e) => { + setEditLableId('') + setTagName(null) + }) } const onChangeLable = (event, photoTagId) => { @@ -156,8 +145,7 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { if (event.keyCode === 13) { if (tagName) { onSaveLable(photoTagId) - } - else { + } else { setEditLableId('') setTagName(null) } @@ -167,13 +155,13 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { const setVerifyPhoto = (photoTagId) => { verifyPhoto({ variables: { - photoTagId: photoTagId + photoTagId: photoTagId, }, }) - .then((res) => { - if (res.data.verifyPhoto.ok) refetch() - }) - .catch((e) => { }) + .then((res) => { + if (res.data.verifyPhoto.ok) refetch() + }) + .catch((e) => {}) } useEffect(() => { @@ -195,8 +183,8 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { key={index} style={{ left: left, top: top, width: width, height: height }} > - { - !box.deleted ? editLableId == box.id ? + {!box.deleted ? ( + editLableId == box.id ? ( { onKeyUp={(e) => onChangeLable(e, box.id)} ref={ref} /> - : + ) : (
{box.name} -
: null - } +
+ ) + ) : null} {className === 'face' && !box.deleted && (
- { - editLableId == box.id ? - onSaveLable(box.id)} - /> - : - <> - { !box.verified && ( - onSaveLable(box.id)} + /> + ) : ( + <> + {!box.verified && ( + onHandleBlock(box.id)} - /> - )} - { box.showVerifyIcon && ( - + )} + {box.showVerifyIcon && ( + setVerifyPhoto(box.id)} /> - )} - setEditLableId(box.id)} - /> - - } -
+ )} + setEditLableId(box.id)} + /> + + )} +
)}
) From dd4ce4e21ce91bb7e85551a7d2f8de2d71200d2d Mon Sep 17 00:00:00 2001 From: GyanP Date: Thu, 17 Jun 2021 17:37:28 +0530 Subject: [PATCH 055/110] issue fixed --- ui/src/components/SearchInput.js | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/ui/src/components/SearchInput.js b/ui/src/components/SearchInput.js index 56628d44..e8022a56 100644 --- a/ui/src/components/SearchInput.js +++ b/ui/src/components/SearchInput.js @@ -71,9 +71,15 @@ const SearchInput = ({ const onKeyDown = (e) => { if (e.keyCode === 13) { - onSearchTextChange(filteredOptions[activeOption].name) + // onSearchTextChange(filteredOptions[activeOption].name) + onSearchTextChange('') setActiveOption(0) setShowOptions(false) + onFilterToggle( + filteredOptions[activeOption].id, + filteredOptions[activeOption].type, + filteredOptions[activeOption].name + ) } else if (e.keyCode === 38) { if (activeOption === 0) return setActiveOption(activeOption - 1) @@ -87,7 +93,13 @@ const SearchInput = ({ setActiveOption(0) setFilteredOptions([]) setShowOptions(false) - onSearchTextChange(filteredOptions[index].name) + // onSearchTextChange(filteredOptions[index].name) + onSearchTextChange('') + onFilterToggle( + filteredOptions[index].id, + filteredOptions[index].type, + filteredOptions[index].name + ) } let optionList; From ce19af198674db85e38c84aec4888b41a65cf527 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 17 Jun 2021 00:14:22 +0100 Subject: [PATCH 056/110] Minor style tweaks --- ui/src/components/BoundingBoxes.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index b0a1d1e3..7d0cd24c 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -56,8 +56,8 @@ const Container = styled('div')` } .icons { position: absolute; - bottom: 0; - right: 5px; + bottom: -3px; + right: 2px; svg { background: #fff; From 76dd410f5d499e8c10c071539e0c16027cb9ecdf Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 17 Jun 2021 21:18:01 +0100 Subject: [PATCH 057/110] Minor style tweaks --- ui/src/components/BoundingBoxes.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 7d0cd24c..7df1d636 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -32,6 +32,7 @@ const Container = styled('div')` pointer-events: all; } &.face { + cursor: default; &.yellowBox { border-color: rgba(255, 255, 0, 0.75); .FeatureLabel { @@ -53,6 +54,7 @@ const Container = styled('div')` color: #000 !important; width: 100%; border: 0; + padding: 2px 4px; } .icons { position: absolute; @@ -213,6 +215,7 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { alt="Block" className="FeatureIconDelete" onClick={() => onHandleBlock(box.id)} + title="Reject automatic face tag" /> )} {box.showVerifyIcon && ( @@ -220,12 +223,14 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { alt="Done" className="FeatureIconDone" onClick={() => setVerifyPhoto(box.id)} + title="Approve automatic face tag" /> )} setEditLableId(box.id)} + title="Edit person’s name" /> )} From 6fdcb8d2dc3735440af420ca625b1195bc92e23a Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 17 Jun 2021 21:41:11 +0100 Subject: [PATCH 058/110] Made face detection job respect setting. Changed wording and position in settings and onboarding --- photonix/classifiers/info.py | 3 ++- photonix/photos/utils/classification.py | 10 +++++---- ui/src/components/BoundingBoxes.js | 4 ++-- ui/src/components/Settings.js | 25 ++++++++++----------- ui/src/components/onboarding/Step5Search.js | 24 ++++++++++---------- 5 files changed, 34 insertions(+), 32 deletions(-) diff --git a/photonix/classifiers/info.py b/photonix/classifiers/info.py index c14650d0..93f82a2f 100644 --- a/photonix/classifiers/info.py +++ b/photonix/classifiers/info.py @@ -1,6 +1,7 @@ CLASSIFIERS = [ 'color', 'location', - 'object', + 'face', 'style', + 'object', ] diff --git a/photonix/photos/utils/classification.py b/photonix/photos/utils/classification.py index c1713b0a..dae25823 100644 --- a/photonix/photos/utils/classification.py +++ b/photonix/photos/utils/classification.py @@ -11,9 +11,9 @@ CLASSIFIERS = [ 'color', 'location', - 'object', - 'style', 'face', + 'style', + 'object', ] @@ -92,10 +92,12 @@ def run(self, loop=True): task_queryset = Task.objects.filter(library__classification_color_enabled=True, type=self.task_type, status='P') elif self.task_type == 'classify.location': task_queryset = Task.objects.filter(library__classification_location_enabled=True, type=self.task_type, status='P') - elif self.task_type == 'classify.object': - task_queryset = Task.objects.filter(library__classification_object_enabled=True, type=self.task_type, status='P') + elif self.task_type == 'classify.face': + task_queryset = Task.objects.filter(library__classification_face_enabled=True, type=self.task_type, status='P') elif self.task_type == 'classify.style': task_queryset = Task.objects.filter(library__classification_style_enabled=True, type=self.task_type, status='P') + elif self.task_type == 'classify.object': + task_queryset = Task.objects.filter(library__classification_object_enabled=True, type=self.task_type, status='P') else: task_queryset = Task.objects.filter(type=self.task_type, status='P') for task in task_queryset[:8]: diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 7df1d636..15736e6c 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -186,7 +186,7 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { style={{ left: left, top: top, width: width, height: height }} > {!box.deleted ? ( - editLableId == box.id ? ( + editLableId === box.id ? ( { ) : null} {className === 'face' && !box.deleted && (
- {editLableId == box.id ? ( + {editLableId === box.id ? ( { }) + }).catch((e) => {}) return key case 'classificationLocationEnabled': settingUpdateLocation({ @@ -87,7 +87,7 @@ export default function Settings() { newSettings.classificationLocationEnabled, libraryId: activeLibrary?.id, }, - }).catch((e) => { }) + }).catch((e) => {}) return key case 'classificationObjectEnabled': settingUpdateObject({ @@ -96,7 +96,7 @@ export default function Settings() { newSettings.classificationObjectEnabled, libraryId: activeLibrary?.id, }, - }).catch((e) => { }) + }).catch((e) => {}) return key case 'classificationColorEnabled': settingUpdateColor({ @@ -104,16 +104,15 @@ export default function Settings() { classificationColorEnabled: newSettings.classificationColorEnabled, libraryId: activeLibrary?.id, }, - }).catch((e) => { }) + }).catch((e) => {}) return key case 'classificationFaceEnabled': settingUpdateFace({ variables: { - classificationFaceEnabled: - newSettings.classificationFaceEnabled, + classificationFaceEnabled: newSettings.classificationFaceEnabled, libraryId: activeLibrary?.id, }, - }).catch((e) => { }) + }).catch((e) => {}) return key default: return null @@ -136,7 +135,7 @@ export default function Settings() { sourceFolder: newSettings.sourceDirs, libraryId: activeLibrary?.id, }, - }).catch((e) => { }) + }).catch((e) => {}) } const [settingUpdateStyle] = useMutation(SETTINGS_STYLE) const [settingUpdateColor] = useMutation(SETTINGS_COLOR) diff --git a/ui/src/components/onboarding/Step5Search.js b/ui/src/components/onboarding/Step5Search.js index af03b415..005f4d69 100644 --- a/ui/src/components/onboarding/Step5Search.js +++ b/ui/src/components/onboarding/Step5Search.js @@ -73,38 +73,38 @@ const Step5Search = ({ history }) => { } /> From 9ec1653d04115f3d227f5ebe0bef777861342e46 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 17 Jun 2021 23:30:44 +0100 Subject: [PATCH 059/110] Fixes migrations --- .../migrations/0004_alter_user_first_name.py | 18 ++++++++++++++++++ ...0603_1442.py => 0009_auto_20210617_2218.py} | 9 +++++++-- ...0009_library_classification_face_enabled.py | 18 ------------------ 3 files changed, 25 insertions(+), 20 deletions(-) create mode 100644 photonix/accounts/migrations/0004_alter_user_first_name.py rename photonix/photos/migrations/{0008_auto_20210603_1442.py => 0009_auto_20210617_2218.py} (78%) delete mode 100644 photonix/photos/migrations/0009_library_classification_face_enabled.py diff --git a/photonix/accounts/migrations/0004_alter_user_first_name.py b/photonix/accounts/migrations/0004_alter_user_first_name.py new file mode 100644 index 00000000..7efbf1a9 --- /dev/null +++ b/photonix/accounts/migrations/0004_alter_user_first_name.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.3 on 2021-06-17 22:18 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('accounts', '0003_add_user_signup_flds'), + ] + + operations = [ + migrations.AlterField( + model_name='user', + name='first_name', + field=models.CharField(blank=True, max_length=150, verbose_name='first name'), + ), + ] diff --git a/photonix/photos/migrations/0008_auto_20210603_1442.py b/photonix/photos/migrations/0009_auto_20210617_2218.py similarity index 78% rename from photonix/photos/migrations/0008_auto_20210603_1442.py rename to photonix/photos/migrations/0009_auto_20210617_2218.py index 96aa52ee..9a1db00c 100644 --- a/photonix/photos/migrations/0008_auto_20210603_1442.py +++ b/photonix/photos/migrations/0009_auto_20210617_2218.py @@ -1,4 +1,4 @@ -# Generated by Django 3.2.3 on 2021-06-03 14:42 +# Generated by Django 3.2.3 on 2021-06-17 22:18 from django.db import migrations, models @@ -6,10 +6,15 @@ class Migration(migrations.Migration): dependencies = [ - ('photos', '0007_add_library_ForeignKey'), + ('photos', '0008_auto_20210604_1842'), ] operations = [ + migrations.AddField( + model_name='library', + name='classification_face_enabled', + field=models.BooleanField(default=False, help_text='Run face detection on photos?'), + ), migrations.AddField( model_name='phototag', name='deleted', diff --git a/photonix/photos/migrations/0009_library_classification_face_enabled.py b/photonix/photos/migrations/0009_library_classification_face_enabled.py deleted file mode 100644 index 4fc11522..00000000 --- a/photonix/photos/migrations/0009_library_classification_face_enabled.py +++ /dev/null @@ -1,18 +0,0 @@ -# Generated by Django 3.2.3 on 2021-06-10 12:52 - -from django.db import migrations, models - - -class Migration(migrations.Migration): - - dependencies = [ - ('photos', '0008_auto_20210603_1442'), - ] - - operations = [ - migrations.AddField( - model_name='library', - name='classification_face_enabled', - field=models.BooleanField(default=False, help_text='Run face detection on photos?'), - ), - ] From 4ac15a3f0241f8870f65c0b6d75bbb379a1b1347 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 17 Jun 2021 23:56:15 +0100 Subject: [PATCH 060/110] Create FUNDING.yml --- .github/FUNDING.yml | 1 + 1 file changed, 1 insertion(+) create mode 100644 .github/FUNDING.yml diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..e7ec037d --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +github: [photonixapp] From 8b3e654c0c05630c6ac3a6687f28e59a57b9ea1f Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Fri, 18 Jun 2021 22:40:37 +0100 Subject: [PATCH 061/110] Build dependencies for OpenCV source compilation for ARMv7 --- docker/Dockerfile.prd | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index 8399dc82..55d4574c 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -5,6 +5,7 @@ FROM ${ARCH}python:3.8.9-slim-buster as builder RUN apt-get update && \ apt-get install -y \ build-essential \ + cmake \ curl \ gfortran \ gnupg \ @@ -18,6 +19,7 @@ RUN apt-get update && \ liblapack-dev \ liblapack3 \ libpq-dev \ + libssl-dev \ libtiff5-dev \ && \ apt-get clean && \ From 6ffe443272f59e7d5e0026c833f056f29476bcf2 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Sat, 19 Jun 2021 00:49:25 +0100 Subject: [PATCH 062/110] Fixes ANN retraining version date loading --- .../commands/retrain_face_similarity_index.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/photonix/photos/management/commands/retrain_face_similarity_index.py b/photonix/photos/management/commands/retrain_face_similarity_index.py index e7e1227b..6ac72870 100644 --- a/photonix/photos/management/commands/retrain_face_similarity_index.py +++ b/photonix/photos/management/commands/retrain_face_similarity_index.py @@ -16,15 +16,15 @@ class Command(BaseCommand): help = 'Creates Approximate Nearest Neighbour (ANN) search index for quickly finding closest face without having to compare one-by-one.' def retrain_face_similarity_index(self): - version_file = Path(settings.MODEL_DIR) / 'face' / 'retrained_version.txt' - version_date = None + for library in Library.objects.all(): + version_file = Path(settings.MODEL_DIR) / 'face' / f'{library.id}_retrained_version.txt' + version_date = None - if os.path.exists(version_file): - with open(version_file) as f: - contents = f.read().strip() - version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) + if os.path.exists(version_file): + with open(version_file) as f: + contents = f.read().strip() + version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) - for library in Library.objects.all(): start = time() print(f'Updating ANN index for Library {library.id}') From f01bb76fabba431c6450225cf2141b6a2406083b Mon Sep 17 00:00:00 2001 From: GyanP Date: Mon, 21 Jun 2021 22:51:16 +0530 Subject: [PATCH 063/110] task completed --- ui/src/components/BoundingBoxes.js | 62 ++++++++++++++++++------------ ui/src/components/PhotoDetail.js | 6 ++- ui/src/components/ZoomableImage.js | 34 ++++++++++++++-- 3 files changed, 73 insertions(+), 29 deletions(-) diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 15736e6c..a83f5cd2 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -78,6 +78,9 @@ const Container = styled('div')` } } } + &.hideBox { + border: none; + } } @media all and (max-width: 1000px) { @@ -90,7 +93,7 @@ const Container = styled('div')` } } ` -const BoundingBoxes = ({ boxes, className, refetch }) => { +const BoundingBoxes = ({ boxes, className, refetch, showBoundingBox }) => { const dispatch = useDispatch() const ref = useRef(null) const [editLableId, setEditLableId] = useState('') @@ -100,7 +103,8 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { const [verifyPhoto] = useMutation(VERIFY_FACE_TAG) const tagUpdated = useSelector(isTagUpdated) - const onHandleBlock = (photoTagId) => { + const onHandleBlock = (event, photoTagId) => { + event.stopPropagation(); blockFaceTag({ variables: { photoTagId: photoTagId, @@ -118,7 +122,8 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { .catch((e) => {}) } - const onSaveLable = (photoTagId) => { + const onSaveLable = (event, photoTagId) => { + event.stopPropagation(); editFaceTag({ variables: { photoTagId: photoTagId, @@ -142,11 +147,11 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { }) } - const onChangeLable = (event, photoTagId) => { + const onChangeLable = (event, photoTagId) => { setTagName(event.target.value) if (event.keyCode === 13) { if (tagName) { - onSaveLable(photoTagId) + onSaveLable(event,photoTagId) } else { setEditLableId('') setTagName(null) @@ -154,7 +159,8 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { } } - const setVerifyPhoto = (photoTagId) => { + const setVerifyPhoto = (event, photoTagId) => { + event.stopPropagation(); verifyPhoto({ variables: { photoTagId: photoTagId, @@ -172,6 +178,11 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { } }, [editLableId]) + const updateEditState = (event, boxId) => { + event.stopPropagation(); + setEditLableId(boxId) + } + return ( {boxes?.map((box, index) => { @@ -181,32 +192,33 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { let height = box.sizeY * 100 + '%' return (
- {!box.deleted ? ( - editLableId === box.id ? ( - onChangeLable(e, box.id)} - ref={ref} - /> - ) : ( -
- {box.name} -
+ {showBoundingBox && !box.deleted && ( + editLableId === box.id ? ( + onChangeLable(e, box.id)} + ref={ref} + /> + ) : ( +
+ {box.name} +
+ ) ) - ) : null} + } {className === 'face' && !box.deleted && (
{editLableId === box.id ? ( onSaveLable(box.id)} + onClick={(e) => onSaveLable(e, box.id)} /> ) : ( <> @@ -214,7 +226,7 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { onHandleBlock(box.id)} + onClick={(e) => onHandleBlock(e, box.id)} title="Reject automatic face tag" /> )} @@ -222,14 +234,14 @@ const BoundingBoxes = ({ boxes, className, refetch }) => { setVerifyPhoto(box.id)} + onClick={(e) => setVerifyPhoto(e, box.id)} title="Approve automatic face tag" /> )} setEditLableId(box.id)} + onClick={(e) => updateEditState(e, box.id)} title="Edit person’s name" /> diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index 936ac435..1c4ed4ff 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -102,6 +102,7 @@ const Container = styled('div')` const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { const safeArea = useSelector(getSafeArea) + const [showFaceIcons, setShowFaceIcons] = useState(true) const [showBoundingBox, setShowBoundingBox] = useLocalStorageState( 'showObjectBoxes', true @@ -202,7 +203,10 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { { +const ZoomableImage = ({ + photoId, + boxes, + next, + prev, + refetch, + showBoundingBox, + showFaceIcons, + setShowFaceIcons, +}) => { const [scale, setScale] = useState(1) const [zoom, setZoom] = useState(false) const [loading, setLoading] = useState(true) const [displayImage, setDisplayImage] = useState(false) + let clickTimeOut = null; const prevNextPhotos = useSelector((state) => getPrevNextPhotos(state, photoId) @@ -139,6 +149,19 @@ const ZoomableImage = ({ photoId, boxes, next, prev, refetch}) => { } } + // To handle icon show hide on single click. + const showHideIcons = (event) => { + if(clickTimeOut !== null){ + clearTimeout(clickTimeOut) + }else{ + clickTimeOut = setTimeout(()=>{ + setShowFaceIcons(!showFaceIcons) + clearTimeout(clickTimeOut) + clickTimeOut=null + },300) + } + } + return ( {
-
+
{ className={displayImage ? ' display' : undefined} key={index} > - + ))}
From 58446be155fdfe947e40c8e0973a5b6f2db57c82 Mon Sep 17 00:00:00 2001 From: GyanP Date: Tue, 22 Jun 2021 18:14:00 +0530 Subject: [PATCH 064/110] resolved error requestAnimationFrame on dubble click on photo --- ui/src/components/ZoomableImage.js | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ui/src/components/ZoomableImage.js b/ui/src/components/ZoomableImage.js index 23c73ca0..73eb5d90 100644 --- a/ui/src/components/ZoomableImage.js +++ b/ui/src/components/ZoomableImage.js @@ -135,7 +135,10 @@ const ZoomableImage = ({ photoId, boxes, next, prev, refetch}) => { if (e.scale === 1 && zoom) { setZoom(false) } else if (e.scale > 1 && !zoom) { - setZoom(true) + // setZoom(true) + setTimeout(() => { + setZoom(true); + }, 200); } } From 0e8d3ce4c41ba2705915d02c529fb7ce4d3ec017 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Tue, 22 Jun 2021 22:02:06 +0100 Subject: [PATCH 065/110] Face and event model tidy-ups --- photonix/classifiers/event/model.py | 1 - photonix/classifiers/face/model.py | 10 +++++++++- ui/src/components/BoundingBoxes.js | 4 +++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/photonix/classifiers/event/model.py b/photonix/classifiers/event/model.py index be82592d..4f0bef21 100644 --- a/photonix/classifiers/event/model.py +++ b/photonix/classifiers/event/model.py @@ -23,7 +23,6 @@ def predict(self, image_file): datetime.date(date_taken.year, 12, 31): "New Year Start", datetime.date(date_taken.year, 1, 1): "New Year End", } - date_taken = datetime.datetime(date_taken.year, 12, 31, 2, 30) if events.get(date_taken.date()): if events.get(date_taken.date()).startswith("New Year"): start_of_day = datetime.datetime.combine(datetime.date(date_taken.year, 12, 31), datetime.datetime.min.time()) diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index ad811d54..2e41d733 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -267,9 +267,17 @@ def run_on_photo(photo_id): if result.get('closest_distance', 999) < DISTANCE_THRESHOLD: tag = Tag.objects.get(id=result['closest_tag'], library=photo.library, type='F') print(f'MATCHED {tag.name}') + # Otherwise create new tag else: - tag = get_or_create_tag(library=photo.library, name=f'Unknown person {randint(0, 999999):06d}', type='F', source='C') + while True: + random_name = f'Unknown person {randint(0, 999999):06d}' + try: + Tag.objects.get(library=photo.library, name=random_name, type='F', source='C') + except Tag.DoesNotExist: + tag = Tag(library=photo.library, name=random_name, type='F', source='C') + tag.save() + break x = (result['box'][0] + (result['box'][2] / 2)) / photo.base_file.width y = (result['box'][1] + (result['box'][3] / 2)) / photo.base_file.height diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 15736e6c..e297279a 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -33,6 +33,7 @@ const Container = styled('div')` } &.face { cursor: default; + overflow: visible; &.yellowBox { border-color: rgba(255, 255, 0, 0.75); .FeatureLabel { @@ -58,8 +59,9 @@ const Container = styled('div')` } .icons { position: absolute; - bottom: -3px; + bottom: -2px; right: 2px; + width: max-content; svg { background: #fff; From b42865efdee4a15d3973eca5d7f4317278f11fd9 Mon Sep 17 00:00:00 2001 From: GyanP Date: Wed, 23 Jun 2021 18:19:19 +0530 Subject: [PATCH 066/110] Task completed --- ui/src/components/MapView.js | 31 ++++++++++++++++++------------- ui/src/static/css/Map.css | 4 ++++ 2 files changed, 22 insertions(+), 13 deletions(-) diff --git a/ui/src/components/MapView.js b/ui/src/components/MapView.js index 1bc8319e..c479c7c2 100644 --- a/ui/src/components/MapView.js +++ b/ui/src/components/MapView.js @@ -1,9 +1,10 @@ import React, {useEffect, useState} from 'react' import PropTypes from 'prop-types' -import { Link, useHistory } from 'react-router-dom' -import { MapContainer, Marker, Popup, TileLayer } from 'react-leaflet' +import { useHistory } from 'react-router-dom' +import { MapContainer, Marker, TileLayer } from 'react-leaflet' import MarkerClusterGroup from 'react-leaflet-markercluster' import {useMapEvent} from "react-leaflet"; +import L from "leaflet"; import '../static/css/Map.css' import 'react-leaflet-markercluster/dist/styles.min.css' // sass @@ -57,23 +58,27 @@ const MapView = ({ return null } + const getMarkerIcon = (photoThumbnail) => { + return new L.Icon({ + iconUrl: photoThumbnail, + iconSize: new L.Point(60, 75), + className: "leaflet-custom-icon" + }); + } + if (photos) { markers = photos.map((photo, idx) => photo.location ? ( - - - marker popup - - - + eventHandlers={{ + click: () => { + history.push(`/photo/${photo.id}`); + }, + }} + /> ) : null ) return ( diff --git a/ui/src/static/css/Map.css b/ui/src/static/css/Map.css index 46e843fc..a7078ed7 100644 --- a/ui/src/static/css/Map.css +++ b/ui/src/static/css/Map.css @@ -49,3 +49,7 @@ .leaflet-popup-close-button { display: none; } +.leaflet-custom-icon { + border: 3px solid rgba(255, 255, 255, 0.9); + border-radius: 50%; +} From 4c739a0e25b011c67b22e5ee96b945c10f3cccf9 Mon Sep 17 00:00:00 2001 From: GyanP Date: Thu, 24 Jun 2021 23:33:01 +0530 Subject: [PATCH 067/110] bug fixed --- ui/src/components/ModalForm.js | 1 + ui/src/components/onboarding/Result.js | 1 + 2 files changed, 2 insertions(+) diff --git a/ui/src/components/ModalForm.js b/ui/src/components/ModalForm.js index 8d3c0af3..fec13b50 100644 --- a/ui/src/components/ModalForm.js +++ b/ui/src/components/ModalForm.js @@ -50,6 +50,7 @@ const ModalForm = ({ const [stepFiveRegistration] = useMutation(STEP_FIVE) const onSubmit = (data) => { if (nextStep === '/onboarding/step2') { + localStorage.setItem("isSignin", false); stepOneRegistration({ variables: { username: data.username, diff --git a/ui/src/components/onboarding/Result.js b/ui/src/components/onboarding/Result.js index d9d66664..ba0c4118 100644 --- a/ui/src/components/onboarding/Result.js +++ b/ui/src/components/onboarding/Result.js @@ -2,6 +2,7 @@ import React from 'react' const Result = () => { localStorage.setItem('isSignin', true) + sessionStorage.removeItem('__STATE_MACHINE__'); setTimeout(() => { window.location.reload() }, 2000) From d7a6fb656a7ce0832bf086359a29a6d3302a792e Mon Sep 17 00:00:00 2001 From: GyanP Date: Sat, 26 Jun 2021 10:26:02 +0530 Subject: [PATCH 068/110] completed 3rd 4rth and 5th point in task --- photonix/photos/schema.py | 5 +++-- ui/src/components/BoundingBoxes.js | 35 +++++++++++++++++++----------- ui/src/components/ZoomableImage.js | 32 ++++++++++++++++++++------- 3 files changed, 49 insertions(+), 23 deletions(-) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 73bec9b0..5cb937a1 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -354,8 +354,8 @@ def resolve_all_person_tags(self, info, **kwargs): photos_list = filter_photos_queryset( filters, Photo.objects.filter(library__users__user=user), kwargs.get('library_id')) - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list).distinct() - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__deleted=False).distinct() + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list).order_by('-name').distinct() + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__deleted=False).order_by('-name').distinct() def resolve_all_color_tags(self, info, **kwargs): user = info.context.user @@ -880,6 +880,7 @@ def mutate(self, info, photo_tag_id=None, new_name=None): already_assigned_tag.save() photo_tag.verified = True photo_tag.confidence = 1 + photo_tag.deleted = False photo_tag.save() return EditFaceTag(ok=True) diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 5b919cbe..816d3b32 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -50,6 +50,10 @@ const Container = styled('div')` } &.whiteBox { border-color: rgba(202, 202, 191, 0.5); + .FeatureLabel { + color: #000; + background-color: rgba(202, 202, 191, 0.5); + } } .FeatureEditText { color: #000 !important; @@ -95,11 +99,19 @@ const Container = styled('div')` } } ` -const BoundingBoxes = ({ boxes, className, refetch, showBoundingBox }) => { +const BoundingBoxes = ({ +boxes, +className, +refetch, +showBoundingBox, +editLableId, +setEditLableId, +tagName, +setTagName, +cancelTagEditing, +}) => { const dispatch = useDispatch() const ref = useRef(null) - const [editLableId, setEditLableId] = useState('') - const [tagName, setTagName] = useState(null) const [editFaceTag] = useMutation(EDIT_FACE_TAG) const [blockFaceTag] = useMutation(BLOCK_FACE_TAG) const [verifyPhoto] = useMutation(VERIFY_FACE_TAG) @@ -133,8 +145,7 @@ const BoundingBoxes = ({ boxes, className, refetch, showBoundingBox }) => { }, }) .then((res) => { - setEditLableId('') - setTagName(null) + cancelTagEditing() if (res.data.editFaceTag.ok) { refetch() dispatch({ @@ -144,8 +155,7 @@ const BoundingBoxes = ({ boxes, className, refetch, showBoundingBox }) => { } }) .catch((e) => { - setEditLableId('') - setTagName(null) + cancelTagEditing() }) } @@ -155,8 +165,7 @@ const BoundingBoxes = ({ boxes, className, refetch, showBoundingBox }) => { if (tagName) { onSaveLable(event,photoTagId) } else { - setEditLableId('') - setTagName(null) + cancelTagEditing() } } } @@ -198,7 +207,7 @@ const BoundingBoxes = ({ boxes, className, refetch, showBoundingBox }) => { key={index} style={{ left: left, top: top, width: width, height: height }} > - {showBoundingBox && !box.deleted && ( + {showBoundingBox && ( editLableId === box.id ? ( { ) ) } - {className === 'face' && !box.deleted && ( + {className === 'face' && (
{editLableId === box.id ? ( { /> ) : ( <> - {!box.verified && ( + {!box.verified && !box.deleted && ( { title="Reject automatic face tag" /> )} - {box.showVerifyIcon && ( + {box.showVerifyIcon && !box.deleted && ( @@ -151,15 +153,24 @@ const ZoomableImage = ({ // To handle icon show hide on single click. const showHideIcons = (event) => { - if(clickTimeOut !== null){ - clearTimeout(clickTimeOut) - }else{ - clickTimeOut = setTimeout(()=>{ - setShowFaceIcons(!showFaceIcons) + if (!editLableId){ + if(clickTimeOut !== null){ clearTimeout(clickTimeOut) - clickTimeOut=null - },300) - } + }else{ + clickTimeOut = setTimeout(()=>{ + setShowFaceIcons(!showFaceIcons) + clearTimeout(clickTimeOut) + clickTimeOut=null + },300) + } + }else{ + cancelTagEditing() + } + } + + const cancelTagEditing = (e) => { + setEditLableId('') + setTagName(null) } return ( @@ -200,6 +211,11 @@ const ZoomableImage = ({ className={key} refetch={refetch} showBoundingBox={showBoundingBox} + editLableId={editLableId} + setEditLableId={setEditLableId} + tagName={tagName} + setTagName={setTagName} + cancelTagEditing={cancelTagEditing} /> ))} From 10f2775a7c1369c8ecc1ccc88e31e82215a0aa02 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Sat, 26 Jun 2021 12:35:33 +0100 Subject: [PATCH 069/110] Postgres and Redis port number environment variables --- docker/docker-compose.example.yml | 1 + photonix/classifiers/base_model.py | 9 +++++---- photonix/classifiers/face/model.py | 11 ++++------- photonix/classifiers/object/model.py | 6 +++--- photonix/classifiers/style/model.py | 5 ++--- .../photos/management/commands/rescan_photos.py | 7 ++----- .../commands/rescan_photos_periodically.py | 6 ++---- .../management/commands/reset_redis_locks.py | 8 ++------ photonix/photos/schema.py | 12 ++++++------ photonix/photos/utils/redis.py | 10 ++++++++++ photonix/photos/utils/thumbnails.py | 1 - photonix/web/settings.py | 5 +++-- photonix/web/utils.py | 16 ++++++++-------- 13 files changed, 48 insertions(+), 49 deletions(-) create mode 100644 photonix/photos/utils/redis.py diff --git a/docker/docker-compose.example.yml b/docker/docker-compose.example.yml index 62524783..f2728366 100644 --- a/docker/docker-compose.example.yml +++ b/docker/docker-compose.example.yml @@ -27,6 +27,7 @@ services: POSTGRES_PASSWORD: password REDIS_HOST: redis ALLOWED_HOSTS: '*' + # More configuration options here: https://photonix.org/docs/configuration/ volumes: - ./data/photos:/data/photos - ./data/raw-photos-processed:/data/raw-photos-processed diff --git a/photonix/classifiers/base_model.py b/photonix/classifiers/base_model.py index 91e243d5..9487fae1 100644 --- a/photonix/classifiers/base_model.py +++ b/photonix/classifiers/base_model.py @@ -9,14 +9,16 @@ import logging import requests - -import redis from redis_lock import Lock +from photonix.photos.utils.redis import redis_connection + + graph_cache = {} logger = logging.getLogger(__name__) + class BaseModel: def __init__(self, model_dir=None): global graph_cache @@ -50,8 +52,7 @@ def ensure_downloaded(self, lock_name=None): if not lock_name: lock_name = 'classifier_{}_download'.format(self.name) - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - with Lock(r, lock_name): + with Lock(redis_connection, lock_name): try: with open(version_file) as f: if f.read().strip() == str(self.version): diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index 2e41d733..2a5c5d2d 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -9,7 +9,6 @@ from django.utils import timezone import numpy as np from PIL import Image -import redis from redis_lock import Lock from photonix.classifiers.base_model import BaseModel @@ -17,6 +16,7 @@ from photonix.classifiers.face.mtcnn import MTCNN from photonix.classifiers.face.deepface.commons.distance import findEuclideanDistance from photonix.classifiers.face.deepface.DeepFace import build_model +from photonix.photos.utils.redis import redis_connection GRAPH_FILE = os.path.join('face', 'mtcnn_weights.npy') @@ -42,8 +42,7 @@ def __init__(self, model_dir=None, graph_file=GRAPH_FILE, library_id=None, lock_ def load_graph(self, graph_file): - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - with Lock(r, 'classifier_{}_load_graph'.format(self.name)): + with Lock(redis_connection, 'classifier_{}_load_graph'.format(self.name)): # Load MTCNN mtcnn_graph = None mtcnn_key = '{self.graph_cache_key}:mtcnn' @@ -98,8 +97,7 @@ def find_closest_face_tag_by_ann(self, source_embedding): embedding_size = 128 # FaceNet output size t = AnnoyIndex(embedding_size, 'euclidean') # Ensure ANN index, tag IDs and version files can't be updated while we are reading - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - with Lock(r, 'face_model_retrain'): + with Lock(redis_connection, 'face_model_retrain'): self.reload_retrained_model_version() t.load(str(ann_path)) with open(tag_ids_path) as f: @@ -193,8 +191,7 @@ def retrain_face_similarity_index(self, training_data=None): t.build(3) # Number of random forest trees # Aquire lock to save ANN, tag IDs and version files atomically - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - with Lock(r, 'face_model_retrain'): + with Lock(redis_connection, 'face_model_retrain'): # Save ANN index t.save(str(ann_path)) diff --git a/photonix/classifiers/object/model.py b/photonix/classifiers/object/model.py index edf15cd3..a77b99c6 100644 --- a/photonix/classifiers/object/model.py +++ b/photonix/classifiers/object/model.py @@ -5,14 +5,14 @@ from django.utils import timezone import numpy as np from PIL import Image -import redis from redis_lock import Lock import tensorflow as tf from photonix.classifiers.object.utils import label_map_util from photonix.classifiers.base_model import BaseModel +from photonix.photos.utils.redis import redis_connection + -r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) GRAPH_FILE = os.path.join('object', 'ssd_mobilenet_v2_oid_v4_2018_12_12_frozen_inference_graph.pb') LABEL_FILE = os.path.join('object', 'oid_v4_label_map.pbtxt') @@ -33,7 +33,7 @@ def __init__(self, model_dir=None, graph_file=GRAPH_FILE, label_file=LABEL_FILE, self.labels = self.load_labels(label_file) def load_graph(self, graph_file): - with Lock(r, 'classifier_{}_load_graph'.format(self.name)): + with Lock(redis_connection, 'classifier_{}_load_graph'.format(self.name)): if self.graph_cache_key in self.graph_cache: return self.graph_cache[self.graph_cache_key] diff --git a/photonix/classifiers/style/model.py b/photonix/classifiers/style/model.py index 7dfc5c47..c06fa906 100644 --- a/photonix/classifiers/style/model.py +++ b/photonix/classifiers/style/model.py @@ -4,11 +4,11 @@ import numpy as np -import redis from redis_lock import Lock import tensorflow as tf from photonix.classifiers.base_model import BaseModel +from photonix.photos.utils.redis import redis_connection GRAPH_FILE = os.path.join('style', 'graph.pb') @@ -34,8 +34,7 @@ def __init__(self, model_dir=None, graph_file=GRAPH_FILE, label_file=LABEL_FILE, self.labels = self.load_labels(label_file) def load_graph(self, graph_file): - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - with Lock(r, 'classifier_{}_load_graph'.format(self.name)): + with Lock(redis_connection, 'classifier_{}_load_graph'.format(self.name)): if self.graph_cache_key in self.graph_cache: return self.graph_cache[self.graph_cache_key] diff --git a/photonix/photos/management/commands/rescan_photos.py b/photonix/photos/management/commands/rescan_photos.py index 745cc2a3..f45cd01c 100644 --- a/photonix/photos/management/commands/rescan_photos.py +++ b/photonix/photos/management/commands/rescan_photos.py @@ -1,10 +1,8 @@ -import os - from django.conf import settings from django.core.management.base import BaseCommand -import redis from redis_lock import Lock +from photonix.photos.utils.redis import redis_connection from photonix.photos.utils.organise import rescan_photo_libraries from photonix.photos.utils.system import missing_system_dependencies @@ -25,6 +23,5 @@ def rescan_photos(self, paths): print('Rescan complete') def handle(self, *args, **options): - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - with Lock(r, 'rescan_photos'): + with Lock(redis_connection, 'rescan_photos'): self.rescan_photos(options['paths']) diff --git a/photonix/photos/management/commands/rescan_photos_periodically.py b/photonix/photos/management/commands/rescan_photos_periodically.py index eb7e35cb..d83ddb19 100644 --- a/photonix/photos/management/commands/rescan_photos_periodically.py +++ b/photonix/photos/management/commands/rescan_photos_periodically.py @@ -1,13 +1,12 @@ -import os from time import sleep from django.conf import settings from django.core.management.base import BaseCommand -import redis from redis_lock import Lock from photonix.photos.utils.organise import rescan_photo_libraries from photonix.photos.utils.system import missing_system_dependencies +from photonix.photos.utils.redis import redis_connection class Command(BaseCommand): @@ -26,10 +25,9 @@ def rescan_photos(self, paths): print('Rescan complete') def handle(self, *args, **options): - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) try: while True: - with Lock(r, 'rescan_photos'): + with Lock(redis_connection, 'rescan_photos'): self.rescan_photos(options['paths']) sleep(60 * 60) # Sleep for an hour except KeyboardInterrupt: diff --git a/photonix/photos/management/commands/reset_redis_locks.py b/photonix/photos/management/commands/reset_redis_locks.py index 2ef7cfb5..970a44f0 100644 --- a/photonix/photos/management/commands/reset_redis_locks.py +++ b/photonix/photos/management/commands/reset_redis_locks.py @@ -1,15 +1,11 @@ -import os - from django.core.management.base import BaseCommand -import redis import redis_lock - -r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) +from photonix.photos.utils.redis import redis_connection class Command(BaseCommand): help = 'Removes all Redis locks - intended to be run on server start.' def handle(self, *args, **options): - redis_lock.reset_all(r) + redis_lock.reset_all(redis_connection) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 73bec9b0..04359dcb 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -1,19 +1,19 @@ +import os from django.conf import settings +from django.contrib.auth import get_user_model, load_backend, login import django_filters from django_filters import CharFilter from graphene_django.filter import DjangoFilterConnectionField from graphene_django.types import DjangoObjectType from graphql_jwt.decorators import login_required from graphql import GraphQLError -from django.db.models import Q -from django.contrib.auth import get_user_model +import graphene + from .models import Library, Camera, Lens, Photo, Tag, PhotoTag, LibraryPath, LibraryUser, PhotoFile, Task -from django.contrib.auth import load_backend, login from photonix.photos.utils.filter_photos import filter_photos_queryset, sort_photos_exposure from photonix.photos.utils.metadata import PhotoMetadata -import os -import graphene + User = get_user_model() @@ -314,7 +314,7 @@ def resolve_photo(self, info, **kwargs): @login_required def resolve_all_photos(self, info, **kwargs): user = info.context.user - return Photo.objects.filter(library__users__user=user) + return Photo.objects.filter(library__users__user=user, thumbnailed_version__isnull=False) @login_required def resolve_map_photos(self, info, **kwargs): diff --git a/photonix/photos/utils/redis.py b/photonix/photos/utils/redis.py new file mode 100644 index 00000000..a83c27d1 --- /dev/null +++ b/photonix/photos/utils/redis.py @@ -0,0 +1,10 @@ +import os + +import redis + + +redis_connection = redis.Redis( + host=os.environ.get('REDIS_HOST', '127.0.0.1'), + port=int(os.environ.get('REDIS_PORT', '6379')), + db=int(os.environ.get('REDIS_DB', '0')) +) diff --git a/photonix/photos/utils/thumbnails.py b/photonix/photos/utils/thumbnails.py index 8225e2dd..c897e659 100644 --- a/photonix/photos/utils/thumbnails.py +++ b/photonix/photos/utils/thumbnails.py @@ -8,7 +8,6 @@ import numpy as np from django.conf import settings -from django.utils import timezone from photonix.photos.models import Photo, PhotoFile, Task from photonix.photos.utils.metadata import PhotoMetadata diff --git a/photonix/web/settings.py b/photonix/web/settings.py index 8397662b..d581bb45 100644 --- a/photonix/web/settings.py +++ b/photonix/web/settings.py @@ -88,6 +88,7 @@ 'NAME': os.environ.get('POSTGRES_DB', 'photonix'), 'USER': os.environ.get('POSTGRES_USER', 'postgres'), 'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'password'), + 'PORT': int(os.environ.get('POSTGRES_PORT', '5432')), } } @@ -151,8 +152,8 @@ # Width, height, crop method, JPEG quality, whether it should be generated upon upload, force accurate gamma-aware sRGB resizing (256, 256, 'cover', 50, True, True), # Square thumbnails # We use the largest dimension for both dimensions as they won't crop and some with in portrait mode - (960, 960, 'contain', 75, False, False), # 960px - (1920, 1920, 'contain', 75, False, False), # 2k + # (960, 960, 'contain', 75, False, False), # 960px + # (1920, 1920, 'contain', 75, False, False), # 2k (3840, 3840, 'contain', 75, False, False), # 4k ] diff --git a/photonix/web/utils.py b/photonix/web/utils.py index fbca1f5b..5d33cbea 100644 --- a/photonix/web/utils.py +++ b/photonix/web/utils.py @@ -1,9 +1,10 @@ import os from django.core.management import utils -import redis from redis_lock import Lock +from photonix.photos.utils.redis import redis_connection + def get_secret_key(): # To avoid each installation having the same Django SECERT_KEY we generate @@ -16,17 +17,16 @@ def get_secret_key(): if 'DJANGO_SECRET_KEY' in os.environ: secret_key = os.environ.get('DJANGO_SECRET_KEY') else: - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - if r.exists('django_secret_key'): - secret_key = r.get('django_secret_key').decode('utf-8') + if redis_connection.exists('django_secret_key'): + secret_key = redis_connection.get('django_secret_key').decode('utf-8') else: # Make sure only first worker generates the key and others get from Redis - with Lock(r, 'django_secret_key_generation_lock'): - if r.exists('django_secret_key'): - secret_key = r.get('django_secret_key').decode('utf-8') + with Lock(redis_connection, 'django_secret_key_generation_lock'): + if redis_connection.exists('django_secret_key'): + secret_key = redis_connection.get('django_secret_key').decode('utf-8') else: secret_key = utils.get_random_secret_key() - r.set('django_secret_key', secret_key.encode('utf-8')) + redis_connection.set('django_secret_key', secret_key.encode('utf-8')) if not secret_key: raise EnvironmentError('No secret key available') From a011b7d377a1948dd49989a38dddfffd9c0dd1e3 Mon Sep 17 00:00:00 2001 From: GyanP Date: Mon, 28 Jun 2021 15:12:04 +0530 Subject: [PATCH 070/110] task completed --- photonix/photos/schema.py | 6 ++-- ui/src/components/BoundingBoxes.js | 37 +++++++++++------------ ui/src/components/ZoomableImage.js | 13 ++------ ui/src/containers/PhotoDetailContainer.js | 2 +- 4 files changed, 24 insertions(+), 34 deletions(-) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index c6c4213b..21adec45 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -13,7 +13,7 @@ from .models import Library, Camera, Lens, Photo, Tag, PhotoTag, LibraryPath, LibraryUser, PhotoFile, Task from photonix.photos.utils.filter_photos import filter_photos_queryset, sort_photos_exposure from photonix.photos.utils.metadata import PhotoMetadata - +from django.db.models.functions import Lower User = get_user_model() @@ -354,8 +354,8 @@ def resolve_all_person_tags(self, info, **kwargs): photos_list = filter_photos_queryset( filters, Photo.objects.filter(library__users__user=user), kwargs.get('library_id')) - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list).order_by('-name').distinct() - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__deleted=False).order_by('-name').distinct() + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list).order_by(Lower('name')).distinct() + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__deleted=False).order_by(Lower('name')).distinct() def resolve_all_color_tags(self, info, **kwargs): user = info.context.user diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 816d3b32..26aacb11 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -99,6 +99,9 @@ const Container = styled('div')` } } ` +const ENTER_KEY = 13 +const ESCAPE_KEY = 27 + const BoundingBoxes = ({ boxes, className, @@ -106,9 +109,6 @@ refetch, showBoundingBox, editLableId, setEditLableId, -tagName, -setTagName, -cancelTagEditing, }) => { const dispatch = useDispatch() const ref = useRef(null) @@ -118,7 +118,7 @@ cancelTagEditing, const tagUpdated = useSelector(isTagUpdated) const onHandleBlock = (event, photoTagId) => { - event.stopPropagation(); + stopParentEventBehavior(event) blockFaceTag({ variables: { photoTagId: photoTagId, @@ -137,15 +137,15 @@ cancelTagEditing, } const onSaveLable = (event, photoTagId) => { - event.stopPropagation(); + stopParentEventBehavior(event) editFaceTag({ variables: { photoTagId: photoTagId, - newName: tagName, + newName: ref.current.value, }, }) .then((res) => { - cancelTagEditing() + setEditLableId('') if (res.data.editFaceTag.ok) { refetch() dispatch({ @@ -155,23 +155,16 @@ cancelTagEditing, } }) .catch((e) => { - cancelTagEditing() + setEditLableId('') }) } const onChangeLable = (event, photoTagId) => { - setTagName(event.target.value) - if (event.keyCode === 13) { - if (tagName) { - onSaveLable(event,photoTagId) - } else { - cancelTagEditing() - } - } + (event.keyCode === ENTER_KEY && ref.current.value && onSaveLable(event,photoTagId)) || (event.keyCode === ESCAPE_KEY && setEditLableId('')) } const setVerifyPhoto = (event, photoTagId) => { - event.stopPropagation(); + stopParentEventBehavior(event) verifyPhoto({ variables: { photoTagId: photoTagId, @@ -190,10 +183,14 @@ cancelTagEditing, }, [editLableId]) const updateEditState = (event, boxId) => { - event.stopPropagation(); + stopParentEventBehavior(event) setEditLableId(boxId) } + const stopParentEventBehavior = (event) => { + event.stopPropagation(); + } + return ( {boxes?.map((box, index) => { @@ -213,8 +210,10 @@ cancelTagEditing, type="text" name="tagName" className="FeatureEditText" - onKeyUp={(e) => onChangeLable(e, box.id)} + onKeyDown={(e) => onChangeLable(e, box.id)} ref={ref} + onMouseDown={(e) => stopParentEventBehavior(e) } + onClick={(e) => stopParentEventBehavior(e) } /> ) : (
diff --git a/ui/src/components/ZoomableImage.js b/ui/src/components/ZoomableImage.js index 23dc4c75..aecc2e34 100644 --- a/ui/src/components/ZoomableImage.js +++ b/ui/src/components/ZoomableImage.js @@ -93,7 +93,6 @@ const ZoomableImage = ({ const [loading, setLoading] = useState(true) const [displayImage, setDisplayImage] = useState(false) const [editLableId, setEditLableId] = useState('') - const [tagName, setTagName] = useState(null) let clickTimeOut = null; const prevNextPhotos = useSelector((state) => @@ -167,15 +166,10 @@ const ZoomableImage = ({ },300) } }else{ - cancelTagEditing() + setEditLableId('') } } - - const cancelTagEditing = (e) => { - setEditLableId('') - setTagName(null) - } - + return ( ))} diff --git a/ui/src/containers/PhotoDetailContainer.js b/ui/src/containers/PhotoDetailContainer.js index 2cc9fc9e..076d93be 100644 --- a/ui/src/containers/PhotoDetailContainer.js +++ b/ui/src/containers/PhotoDetailContainer.js @@ -125,7 +125,7 @@ const PhotoDetailContainer = (props) => { const handleKeyDown = (event) => { switch (event.keyCode) { case ESCAPE_KEY: - history.push('/') + event.target.name !== 'tagName' && history.push('/') break default: break From 48c8a9d806e23e555ebeae0c406aad139cf3f831 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 8 Jul 2021 18:17:08 +0100 Subject: [PATCH 071/110] Tidy-up commented code --- ui/src/components/ZoomableImage.js | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/ui/src/components/ZoomableImage.js b/ui/src/components/ZoomableImage.js index 73eb5d90..de5be349 100644 --- a/ui/src/components/ZoomableImage.js +++ b/ui/src/components/ZoomableImage.js @@ -5,7 +5,7 @@ import { TransformWrapper, TransformComponent } from 'react-zoom-pan-pinch' import { useSwipeable } from 'react-swipeable' import { useSelector } from 'react-redux' -import BoundingBoxes from './BoundingBoxes' +import BoundingBoxes from './BoundingBoxes' import Spinner from './Spinner' import { getPrevNextPhotos } from '../stores/photos/selector' @@ -78,7 +78,7 @@ const Container = styled('div')` } ` -const ZoomableImage = ({ photoId, boxes, next, prev, refetch}) => { +const ZoomableImage = ({ photoId, boxes, next, prev, refetch }) => { const [scale, setScale] = useState(1) const [zoom, setZoom] = useState(false) const [loading, setLoading] = useState(true) @@ -135,10 +135,9 @@ const ZoomableImage = ({ photoId, boxes, next, prev, refetch}) => { if (e.scale === 1 && zoom) { setZoom(false) } else if (e.scale > 1 && !zoom) { - // setZoom(true) setTimeout(() => { - setZoom(true); - }, 200); + setZoom(true) + }, 200) } } @@ -175,7 +174,11 @@ const ZoomableImage = ({ photoId, boxes, next, prev, refetch}) => { className={displayImage ? ' display' : undefined} key={index} > - + ))}
@@ -217,7 +220,7 @@ ZoomableImage.propTypes = { sizeY: PropTypes.number, verified: PropTypes.bool, deleted: PropTypes.bool, - boxColorClass: PropTypes.string, + boxColorClass: PropTypes.string, showVerifyIcon: PropTypes.bool, }) ), From 934e611eb3b36d0cb8fa4bc47684c4eed523eb54 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 8 Jul 2021 18:33:26 +0100 Subject: [PATCH 072/110] Styling changes to map thumbnails --- ui/src/components/MapView.js | 50 +++++++++++++++++------------------- ui/src/static/css/Map.css | 4 ++- 2 files changed, 26 insertions(+), 28 deletions(-) diff --git a/ui/src/components/MapView.js b/ui/src/components/MapView.js index c479c7c2..fb1372d3 100644 --- a/ui/src/components/MapView.js +++ b/ui/src/components/MapView.js @@ -1,21 +1,14 @@ -import React, {useEffect, useState} from 'react' +import React, { useEffect, useState } from 'react' import PropTypes from 'prop-types' import { useHistory } from 'react-router-dom' import { MapContainer, Marker, TileLayer } from 'react-leaflet' import MarkerClusterGroup from 'react-leaflet-markercluster' -import {useMapEvent} from "react-leaflet"; -import L from "leaflet"; +import { useMapEvent } from 'react-leaflet' +import L from 'leaflet' import '../static/css/Map.css' import 'react-leaflet-markercluster/dist/styles.min.css' // sass -const MapView = ({ - photos, - bounds, - location, - // zoom, - maxZoom, - hideAttribution, -}) => { +const MapView = ({ photos, bounds, location, maxZoom, hideAttribution }) => { let markers = [] let tileUrl = 'https://{s}.basemaps.cartocdn.com/spotify_dark/{z}/{x}/{y}{r}.png' @@ -27,17 +20,17 @@ const MapView = ({ const [latState, setLatState] = useState(30) const [lngState, setLngState] = useState(0) const [zoomState, setZoomState] = useState(2) - const [map, setMap] = useState(null); + const [map, setMap] = useState(null) const history = useHistory() // Use to check the component comes back from next page or not and setStates. useEffect(() => { - if (history.action === "POP"){ + if (history.action === 'POP') { setZoomState(parseInt(localStorage.getItem('mapZoom'))) setLatState(localStorage.getItem('lat')) setLngState(localStorage.getItem('lng')) } - }, [history]); + }, [history]) // Use to handle map events and set new position and zoom value to map. const MapEvents = () => { @@ -51,19 +44,22 @@ const MapView = ({ localStorage.setItem('lat', mapEvents.getCenter().lat) localStorage.setItem('lng', mapEvents.getCenter().lng) }, - }); - const position = [latState? latState : mapEvents.getCenter().lat, lngState? lngState : mapEvents.getCenter().lng] - const zoom = zoomState? zoomState : mapEvents.getZoom() - if(map) map.setView(position, zoom); - return null + }) + const position = [ + latState ? latState : mapEvents.getCenter().lat, + lngState ? lngState : mapEvents.getCenter().lng, + ] + const zoom = zoomState ? zoomState : mapEvents.getZoom() + if (map) map.setView(position, zoom) + return null } - + const getMarkerIcon = (photoThumbnail) => { return new L.Icon({ iconUrl: photoThumbnail, - iconSize: new L.Point(60, 75), - className: "leaflet-custom-icon" - }); + iconSize: new L.Point(50, 50), + className: 'leaflet-custom-icon', + }) } if (photos) { @@ -75,7 +71,7 @@ const MapView = ({ position={[photo.location[0], photo.location[1]]} eventHandlers={{ click: () => { - history.push(`/photo/${photo.id}`); + history.push(`/photo/${photo.id}`) }, }} /> @@ -88,7 +84,9 @@ const MapView = ({ boundsOptions={{ padding: [100, 100], maxZoom: maxZoom }} zoom={zoomState} center={[latState, lngState]} - whenCreated={map => {setMap(map)}} + whenCreated={(map) => { + setMap(map) + }} > {tileLayer} @@ -114,13 +112,11 @@ MapView.propTypes = { photos: PropTypes.array, bounds: PropTypes.func, location: PropTypes.array, - // zoom: PropTypes.number, maxZoom: PropTypes.number, hideAttribution: PropTypes.bool, } MapView.defaultProps = { - // zoom: 2, maxZoom: 15, } diff --git a/ui/src/static/css/Map.css b/ui/src/static/css/Map.css index a7078ed7..c1b5907b 100644 --- a/ui/src/static/css/Map.css +++ b/ui/src/static/css/Map.css @@ -50,6 +50,8 @@ display: none; } .leaflet-custom-icon { - border: 3px solid rgba(255, 255, 255, 0.9); + border: 1px solid rgba(255, 255, 255, 0.9); border-radius: 50%; + box-shadow: 0 5px 12px rgba(0, 0, 0, 0.5); + background: rgba(255, 255, 255, 0.25); } From 1f60ce258eed0c7c388c801ab73f671a1aadefd4 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 8 Jul 2021 19:12:44 +0100 Subject: [PATCH 073/110] Hiding label of deleted face bounding box --- ui/src/components/BoundingBoxes.js | 56 +++++++++++++++++------------- ui/src/components/PhotoDetail.js | 8 ++--- 2 files changed, 35 insertions(+), 29 deletions(-) diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 26aacb11..a2ea1356 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -102,13 +102,13 @@ const Container = styled('div')` const ENTER_KEY = 13 const ESCAPE_KEY = 27 -const BoundingBoxes = ({ -boxes, -className, -refetch, -showBoundingBox, -editLableId, -setEditLableId, +const BoundingBoxes = ({ + boxes, + className, + refetch, + showBoundingBox, + editLableId, + setEditLableId, }) => { const dispatch = useDispatch() const ref = useRef(null) @@ -159,8 +159,11 @@ setEditLableId, }) } - const onChangeLable = (event, photoTagId) => { - (event.keyCode === ENTER_KEY && ref.current.value && onSaveLable(event,photoTagId)) || (event.keyCode === ESCAPE_KEY && setEditLableId('')) + const onChangeLable = (event, photoTagId) => { + ;(event.keyCode === ENTER_KEY && + ref.current.value && + onSaveLable(event, photoTagId)) || + (event.keyCode === ESCAPE_KEY && setEditLableId('')) } const setVerifyPhoto = (event, photoTagId) => { @@ -188,7 +191,7 @@ setEditLableId, } const stopParentEventBehavior = (event) => { - event.stopPropagation(); + event.stopPropagation() } return ( @@ -198,30 +201,33 @@ setEditLableId, let top = (box.positionY - box.sizeY / 2) * 100 + '%' let width = box.sizeX * 100 + '%' let height = box.sizeY * 100 + '%' + console.log(box) return (
- {showBoundingBox && ( - editLableId === box.id ? ( - onChangeLable(e, box.id)} - ref={ref} - onMouseDown={(e) => stopParentEventBehavior(e) } - onClick={(e) => stopParentEventBehavior(e) } - /> - ) : ( + {showBoundingBox && + (editLableId === box.id ? ( + onChangeLable(e, box.id)} + ref={ref} + onMouseDown={(e) => stopParentEventBehavior(e)} + onClick={(e) => stopParentEventBehavior(e)} + /> + ) : ( + !box.deleted && (
{box.name}
) - ) - } + ))} {className === 'face' && (
{editLableId === box.id ? ( diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index 1c4ed4ff..acfef8f2 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -113,7 +113,7 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { getPrevNextPhotos(state, photoId) ) const [numHistoryPushes, setNumHistoryPushes] = useState(0) - + // TODO: Bring this back so it doesn't get triggered by someone adding a tag with 'i' in it // useEffect(() => { // const handleKeyDown = (event) => { @@ -170,7 +170,7 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { }, [photoId, prevNextPhotos, prevPhoto, nextPhoto]) const setBoxColorClass = (tag) => { - return tag.deleted ? 'whiteBox' : tag.verified ? 'greenBox' : 'yellowBox'; + return tag.deleted ? 'whiteBox' : tag.verified ? 'greenBox' : 'yellowBox' } let boxes = { @@ -193,8 +193,8 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { sizeY: tag.sizeY, verified: tag.verified, deleted: tag.deleted, - boxColorClass: setBoxColorClass(tag), - showVerifyIcon: tag.showVerifyIcon, + boxColorClass: setBoxColorClass(tag), + showVerifyIcon: tag.showVerifyIcon, } }), } From d19c1def5461869ba21b953b4104afd21585dea8 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Sat, 10 Jul 2021 11:09:19 +0100 Subject: [PATCH 074/110] Initial support for HIEF/HEIC --- docker/Dockerfile.dev | 2 ++ docker/Dockerfile.prd | 2 ++ .../migrations/0010_auto_20210710_0959.py | 23 ++++++++++++ photonix/photos/models.py | 4 +-- photonix/photos/utils/db.py | 11 +++++- photonix/photos/utils/raw.py | 35 +++++++++++++++++-- 6 files changed, 71 insertions(+), 6 deletions(-) create mode 100644 photonix/photos/migrations/0010_auto_20210710_0959.py diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index 7d362575..c9d4d89e 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -7,6 +7,7 @@ RUN apt-get update && \ cron \ curl \ dcraw \ + file \ git \ gnupg \ libatlas-base-dev \ @@ -18,6 +19,7 @@ RUN apt-get update && \ libgl1 \ libglib2.0-dev \ libhdf5-dev \ + libheif-examples \ libimage-exiftool-perl \ libjpeg-dev \ liblapack-dev \ diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index 55d4574c..33686966 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -98,11 +98,13 @@ RUN apt-get update && \ apt-get install -y --no-install-recommends \ cron \ dcraw \ + file \ libatlas3-base \ libfreetype6 \ libfreetype6-dev \ libgl1 \ libglib2.0-dev \ + libheif-examples \ libimage-exiftool-perl \ libpq-dev \ libtiff5-dev \ diff --git a/photonix/photos/migrations/0010_auto_20210710_0959.py b/photonix/photos/migrations/0010_auto_20210710_0959.py new file mode 100644 index 00000000..feb380e5 --- /dev/null +++ b/photonix/photos/migrations/0010_auto_20210710_0959.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.3 on 2021-07-10 09:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('photos', '0009_auto_20210617_2218'), + ] + + operations = [ + migrations.AlterField( + model_name='photofile', + name='raw_external_params', + field=models.CharField(blank=True, max_length=32, null=True), + ), + migrations.AlterField( + model_name='photofile', + name='raw_external_version', + field=models.CharField(blank=True, max_length=32, null=True), + ), + ] diff --git a/photonix/photos/models.py b/photonix/photos/models.py index c8ebb88e..a66e68f9 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -203,8 +203,8 @@ class PhotoFile(UUIDModel, VersionedModel): thumbnailed_version = models.PositiveIntegerField(default=0) # Version from photos.utils.thumbnails.THUMBNAILER_VERSION at time of generating the required thumbnails declared in settings.THUMBNAIL_SIZES raw_processed = models.BooleanField(default=False) raw_version = models.PositiveIntegerField(null=True) - raw_external_params = models.CharField(max_length=16, blank=True, null=True) - raw_external_version = models.CharField(max_length=16, blank=True, null=True) + raw_external_params = models.CharField(max_length=32, blank=True, null=True) + raw_external_version = models.CharField(max_length=32, blank=True, null=True) def __str__(self): return str(self.path) diff --git a/photonix/photos/utils/db.py b/photonix/photos/utils/db.py index 9c9182e0..0fe984c0 100644 --- a/photonix/photos/utils/db.py +++ b/photonix/photos/utils/db.py @@ -148,12 +148,21 @@ def record_photo(path, library, inotify_event_type=None): width = height height = old_width + mimetype = mimetypes.guess_type(path)[0] + # HEIF-based images don't get guessed by mimetypes lib so we use the extension if blank + if not mimetype: + ext = os.path.splitext(path)[1].lower() + if ext == '.heic': + mimetype = 'image/heic' + if ext == '.heics': + mimetype = 'image/heic-sequence' + # Save PhotoFile photo_file.photo = photo photo_file.path = path photo_file.width = width photo_file.height = height - photo_file.mimetype = mimetypes.guess_type(path)[0] + photo_file.mimetype = mimetype photo_file.file_modified_at = file_modified_at photo_file.bytes = os.stat(path).st_size photo_file.preferred = False # TODO diff --git a/photonix/photos/utils/raw.py b/photonix/photos/utils/raw.py index a46e9c61..8ca412a1 100644 --- a/photonix/photos/utils/raw.py +++ b/photonix/photos/utils/raw.py @@ -152,6 +152,26 @@ def __dcraw_version(): return +def __heif_convert_version(): + output = subprocess.Popen(['dpkg', '-s', 'libheif-examples'], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].decode('utf-8') + for line in output.split('\n'): + if 'Version: ' in line: + try: + return re.search(r'([0-9]+.[0-9]+.[0-9]+)', line).group(1) + except AttributeError: + return + + +def __exiftool_version(): + output = subprocess.Popen(['dpkg', '-s', 'libimage-exiftool-perl'], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].decode('utf-8') + for line in output.split('\n'): + if 'Version: ' in line: + try: + return re.search(r'([0-9]+.[0-9]+.[0-9]+)', line).group(1) + except AttributeError: + return + + def generate_jpeg(path): basename = os.path.basename(path) temp_dir = tempfile.mkdtemp() @@ -160,6 +180,7 @@ def generate_jpeg(path): valid_image = False process_params = None + external_version = None # Handle Canon's CR3 format since their thumbnails are proprietary. mimetype = get_mimetype(temp_input_path) @@ -180,16 +201,24 @@ def generate_jpeg(path): temp_output_path = exiftool_output['output'] else: temp_output_path = None + process_params = 'exiftool -b -JpgFromRaw' + external_version = __exiftool_version() + elif mimetype == 'image/heif': + temp_output_path = Path(temp_dir) / 'out.jpg' + subprocess.run(['heif-convert', '-q', '90', temp_input_path, temp_output_path]) + process_params = 'heif-convert -q 90' + external_version = __heif_convert_version() else: - # First try to extract the JPEG that might be inside the raw file + # Try to extract the JPEG that might be inside the raw file subprocess.run(['dcraw', '-e', temp_input_path]) temp_output_path = __get_generated_image(temp_dir, basename) + process_params = 'dcraw -e' + external_version = __dcraw_version() # Check the JPEGs dimensions are close enough to the raw's dimensions if temp_output_path: if __has_acceptable_dimensions(temp_input_path, temp_output_path): valid_image = True - process_params = 'dcraw -e' else: os.remove(temp_output_path) @@ -238,5 +267,5 @@ def generate_jpeg(path): shutil.rmtree(temp_dir) if valid_image: - return (final_path, RAW_PROCESS_VERSION, process_params, __dcraw_version()) + return (final_path, RAW_PROCESS_VERSION, process_params, external_version) return (None, RAW_PROCESS_VERSION, None, None) From d34de0ebdbfa54a898c572fa57ae332d3c808c51 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Sat, 10 Jul 2021 12:56:15 +0100 Subject: [PATCH 075/110] Improved robustness --- photonix/photos/utils/raw.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/photonix/photos/utils/raw.py b/photonix/photos/utils/raw.py index 8ca412a1..efa38e97 100644 --- a/photonix/photos/utils/raw.py +++ b/photonix/photos/utils/raw.py @@ -114,7 +114,7 @@ def __has_acceptable_dimensions(original_image_path, new_image_path, accept_empt return False # Embedded image can't be the full resolution - if new_image_dimensions[0] < 512 or new_image_dimensions[1] < 512: + if not new_image_dimensions[0] or new_image_dimensions[1] or new_image_dimensions[0] < 512 or new_image_dimensions[1] < 512: return False # Embedded image is exactly the same dimensions @@ -203,7 +203,7 @@ def generate_jpeg(path): temp_output_path = None process_params = 'exiftool -b -JpgFromRaw' external_version = __exiftool_version() - elif mimetype == 'image/heif': + elif mimetype in ['image/heif', 'image/heic']: temp_output_path = Path(temp_dir) / 'out.jpg' subprocess.run(['heif-convert', '-q', '90', temp_input_path, temp_output_path]) process_params = 'heif-convert -q 90' From 478374899b82c84ce65a28cb7a1ae581889eb38b Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Sun, 11 Jul 2021 21:57:18 +0100 Subject: [PATCH 076/110] Logging refresh, improvements to raw processing --- docker/docker-compose.dev.yml | 1 + photonix/accounts/schema.py | 36 +++++----------- photonix/classifiers/face/model.py | 2 - .../classification_color_processor.py | 4 +- .../classification_event_processor.py | 4 +- .../commands/classification_face_processor.py | 2 + .../classification_location_processor.py | 4 +- .../classification_object_processor.py | 4 +- .../commands/classification_scheduler.py | 3 +- .../classification_style_processor.py | 4 +- .../management/commands/delete_all_photos.py | 3 +- .../management/commands/housekeeping.py | 5 ++- .../management/commands/import_demo_photos.py | 3 +- .../management/commands/import_photos.py | 3 +- .../management/commands/raw_processor.py | 8 ++-- .../management/commands/raw_scheduler.py | 5 ++- .../management/commands/rescan_photos.py | 5 ++- .../commands/rescan_photos_periodically.py | 5 ++- .../commands/retrain_face_similarity_index.py | 9 ++-- .../commands/thumbnail_processor.py | 8 ++-- .../management/commands/watch_photos.py | 24 +++++------ photonix/photos/models.py | 6 ++- photonix/photos/utils/classification.py | 10 +++-- photonix/photos/utils/db.py | 37 ++++++++++------ photonix/photos/utils/metadata.py | 13 ++++-- photonix/photos/utils/organise.py | 1 + photonix/photos/utils/raw.py | 42 ++++++++++++++++--- photonix/photos/utils/thumbnails.py | 2 +- photonix/web/settings.py | 40 ++++++++++++++++++ photonix/web/utils.py | 4 ++ requirements.txt | 1 + system/nginx_dev.conf | 10 +---- system/nginx_prd.conf | 11 +---- 33 files changed, 207 insertions(+), 112 deletions(-) diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 17dc1a11..46102425 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -39,6 +39,7 @@ services: POSTGRES_PASSWORD: password REDIS_HOST: redis ALLOWED_HOSTS: '*' + LOG_LEVEL: DEBUG volumes: - ../photonix:/srv/photonix - ../system:/srv/system diff --git a/photonix/accounts/schema.py b/photonix/accounts/schema.py index a57a19e0..9d6a822a 100644 --- a/photonix/accounts/schema.py +++ b/photonix/accounts/schema.py @@ -3,6 +3,7 @@ from django.contrib.auth import get_user_model, authenticate, update_session_auth_hash import graphene from graphene_django.types import DjangoObjectType +from graphql import GraphQLError from graphql_jwt.shortcuts import create_refresh_token, get_token import graphql_jwt from photonix.photos.models import Library, LibraryPath, LibraryUser @@ -12,18 +13,12 @@ class UserType(DjangoObjectType): - """Docstring for UserType.""" - class Meta: model = User class CreateUser(graphene.Mutation): - """Docstring for CreateUser.""" - class Arguments: - """Docstring for Arguments.""" - username = graphene.String(required=True) password = graphene.String(required=True) password1 = graphene.String(required=True) @@ -34,13 +29,12 @@ class Arguments: @staticmethod def mutate(self, info, username, password, password1): - """Mutate method.""" if User.objects.filter(username=username).exists(): - raise Exception("Username already exists!") + raise GraphQLError('Username already exists!') elif len(password) < 8 and len(password1) < 8: - raise Exception("Password must be at least 8 characters long!") + raise GraphQLError('Password must be at least 8 characters long!') elif password != password1: - raise Exception("Password fields do not match!") + raise GraphQLError('Password fields do not match!') else: user = User(username=username) user.set_password(password1) @@ -61,11 +55,11 @@ class Environment(graphene.ObjectType): class AfterSignup(graphene.ObjectType): - """Pass token for login, after signup.""" - + '''Pass token for login, after signup.''' token = graphene.String() refresh_token = graphene.String() + class Query(graphene.ObjectType): profile = graphene.Field(UserType) environment = graphene.Field(Environment) @@ -74,7 +68,7 @@ class Query(graphene.ObjectType): def resolve_profile(self, info): user = info.context.user if user.is_anonymous: - raise Exception('Not logged in') + raise GraphQLError('Not logged in') return user def resolve_environment(self, info): @@ -82,13 +76,12 @@ def resolve_environment(self, info): if user and user.has_config_persional_info and \ user.has_created_library and user.has_configured_importing and \ user.has_configured_image_analysis: - # raise Exception(info.context.user.is_anonymous) return { 'demo': os.environ.get('DEMO', False), 'first_run': False, } else: - if not user: + if not user or not user.is_authenticated: return { 'demo': os.environ.get('DEMO', False), 'first_run': True, 'form': 'has_config_persional_info'} @@ -111,19 +104,15 @@ def resolve_environment(self, info): } def resolve_after_signup(self, info): - """To login user from frontend after finish sigunp process.""" + '''To login user from frontend after finish sigunp process.''' user = info.context.user - if user.has_configured_image_analysis: + if user.is_authenticated and user.has_configured_image_analysis: return {'token': get_token(user), 'refresh_token': create_refresh_token(user)} return {'token': None, 'refresh_token': None} class ChangePassword(graphene.Mutation): - """docstring for ChangePassword.""" - class Arguments: - """docstring for Arguments.""" - old_password = graphene.String(required=True) new_password = graphene.String(required=True) @@ -131,9 +120,8 @@ class Arguments: @staticmethod def mutate(self, info, old_password, new_password): - """Mutate method for change password.""" if os.environ.get('DEMO', False) and os.environ.get('ENV') != 'test': - raise Exception("Password cannot be changed in demo mode!") + raise GraphQLError('Password cannot be changed in demo mode!') if authenticate(username=info.context.user.username, password=old_password): info.context.user.set_password(new_password) info.context.user.save() @@ -143,8 +131,6 @@ def mutate(self, info, old_password, new_password): class Mutation(graphene.ObjectType): - """To create objects for all mutaions.""" - token_auth = graphql_jwt.ObtainJSONWebToken.Field() verify_token = graphql_jwt.Verify.Field() refresh_token = graphql_jwt.Refresh.Field() diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py index 2a5c5d2d..816bc02d 100644 --- a/photonix/classifiers/face/model.py +++ b/photonix/classifiers/face/model.py @@ -291,8 +291,6 @@ def run_on_photo(photo_id): photo.classifier_color_version = getattr(model, 'version', 0) photo.save() - print('Finished') - return photo, results diff --git a/photonix/photos/management/commands/classification_color_processor.py b/photonix/photos/management/commands/classification_color_processor.py index bebd5515..b5d23308 100644 --- a/photonix/photos/management/commands/classification_color_processor.py +++ b/photonix/photos/management/commands/classification_color_processor.py @@ -1,10 +1,12 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.color import ColorModel, run_on_photo from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger -print('Loading color model') +logger.debug('Loading color model') model = ColorModel() diff --git a/photonix/photos/management/commands/classification_event_processor.py b/photonix/photos/management/commands/classification_event_processor.py index 1174369c..b9493868 100644 --- a/photonix/photos/management/commands/classification_event_processor.py +++ b/photonix/photos/management/commands/classification_event_processor.py @@ -1,10 +1,12 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.event import EventModel, run_on_photo from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger -print('Loading event model') +logger.debug('Loading event model') model = EventModel() diff --git a/photonix/photos/management/commands/classification_face_processor.py b/photonix/photos/management/commands/classification_face_processor.py index 0386c66a..7f1a6dbb 100644 --- a/photonix/photos/management/commands/classification_face_processor.py +++ b/photonix/photos/management/commands/classification_face_processor.py @@ -1,7 +1,9 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.face import run_on_photo from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger model = None diff --git a/photonix/photos/management/commands/classification_location_processor.py b/photonix/photos/management/commands/classification_location_processor.py index feae17f4..495cb3df 100644 --- a/photonix/photos/management/commands/classification_location_processor.py +++ b/photonix/photos/management/commands/classification_location_processor.py @@ -1,10 +1,12 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.location import LocationModel, run_on_photo from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger -print('Loading location model') +logger.debug('Loading location model') model = LocationModel() diff --git a/photonix/photos/management/commands/classification_object_processor.py b/photonix/photos/management/commands/classification_object_processor.py index fa78a804..b89e2eec 100644 --- a/photonix/photos/management/commands/classification_object_processor.py +++ b/photonix/photos/management/commands/classification_object_processor.py @@ -1,10 +1,12 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.object import ObjectModel, run_on_photo from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger -print('Loading object classification model') +logger.debug('Loading object classification model') model = ObjectModel() diff --git a/photonix/photos/management/commands/classification_scheduler.py b/photonix/photos/management/commands/classification_scheduler.py index e840785f..d5aacb14 100644 --- a/photonix/photos/management/commands/classification_scheduler.py +++ b/photonix/photos/management/commands/classification_scheduler.py @@ -4,6 +4,7 @@ from photonix.photos.models import Task from photonix.photos.utils.classification import process_classify_images_tasks +from photonix.web.utils import logger class Command(BaseCommand): @@ -13,7 +14,7 @@ def run_scheduler(self): while True: num_remaining = Task.objects.filter(type='classify_images', status='P').count() if num_remaining: - print('{} photos remaining for classification'.format(num_remaining)) + logger.info('{} photos remaining for classification'.format(num_remaining)) process_classify_images_tasks() sleep(1) diff --git a/photonix/photos/management/commands/classification_style_processor.py b/photonix/photos/management/commands/classification_style_processor.py index 046a29e1..5e379819 100644 --- a/photonix/photos/management/commands/classification_style_processor.py +++ b/photonix/photos/management/commands/classification_style_processor.py @@ -1,10 +1,12 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.style import StyleModel, run_on_photo from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger -print('Loading style classification model') +logger.debug('Loading style classification model') model = StyleModel() diff --git a/photonix/photos/management/commands/delete_all_photos.py b/photonix/photos/management/commands/delete_all_photos.py index b845a74b..1a4da215 100644 --- a/photonix/photos/management/commands/delete_all_photos.py +++ b/photonix/photos/management/commands/delete_all_photos.py @@ -5,6 +5,7 @@ from django.core.management.base import BaseCommand from photonix.photos.models import Camera, Lens, Photo, PhotoFile, Tag +from photonix.web.utils import logger class Command(BaseCommand): @@ -19,7 +20,7 @@ def clear_dir(self, path): elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: - print(e) + logger.error(e) def delete_all_photos(self): Camera.objects.all().delete() diff --git a/photonix/photos/management/commands/housekeeping.py b/photonix/photos/management/commands/housekeeping.py index 0a3be620..5ec98606 100644 --- a/photonix/photos/management/commands/housekeeping.py +++ b/photonix/photos/management/commands/housekeeping.py @@ -8,6 +8,7 @@ from photonix.photos.models import Photo, Task from photonix.photos.utils.thumbnails import THUMBNAILER_VERSION +from photonix.web.utils import logger class Command(BaseCommand): @@ -19,7 +20,7 @@ def housekeeping(self): for directory in os.listdir(settings.THUMBNAIL_ROOT): if directory not in ['photofile']: path = Path(settings.THUMBNAIL_ROOT) / directory - print(f'Removing old cache directory {path}') + logger.info(f'Removing old cache directory {path}') rmtree(path) except FileNotFoundError: # In case thumbnail dir hasn't been created yet pass @@ -27,7 +28,7 @@ def housekeeping(self): # Regenerate any outdated thumbnails photos = Photo.objects.filter(thumbnailed_version__lt=THUMBNAILER_VERSION) if photos.count(): - print(f'Rescheduling {photos.count()} photos to have their thumbnails regenerated') + logger.info(f'Rescheduling {photos.count()} photos to have their thumbnails regenerated') for photo in photos: Task( type='generate_thumbnails', subject_id=photo.id, diff --git a/photonix/photos/management/commands/import_demo_photos.py b/photonix/photos/management/commands/import_demo_photos.py index 18981d0d..82326026 100644 --- a/photonix/photos/management/commands/import_demo_photos.py +++ b/photonix/photos/management/commands/import_demo_photos.py @@ -8,6 +8,7 @@ from photonix.photos.models import Library, LibraryPath, LibraryUser from photonix.photos.utils.db import record_photo from photonix.photos.utils.fs import determine_destination, download_file +from photonix.web.utils import logger User = get_user_model() @@ -86,7 +87,7 @@ def import_photos(self): dest_path = str(Path(dest_dir) / fn) if not os.path.exists(dest_path): - print('Fetching {} -> {}'.format(url, dest_path)) + logger.info('Fetching {} -> {}'.format(url, dest_path)) download_file(url, dest_path) record_photo(dest_path, library) diff --git a/photonix/photos/management/commands/import_photos.py b/photonix/photos/management/commands/import_photos.py index 0c7e5111..71520fbd 100644 --- a/photonix/photos/management/commands/import_photos.py +++ b/photonix/photos/management/commands/import_photos.py @@ -2,6 +2,7 @@ from photonix.photos.utils.organise import import_photos_from_dir from photonix.photos.utils.system import missing_system_dependencies +from photonix.web.utils import logger class Command(BaseCommand): @@ -13,7 +14,7 @@ def add_arguments(self, parser): def import_photos(self, paths): missing = missing_system_dependencies(['exiftool', ]) if missing: - print('Missing dependencies: {}'.format(missing)) + logger.critical('Missing dependencies: {}'.format(missing)) exit(1) for path in paths: diff --git a/photonix/photos/management/commands/raw_processor.py b/photonix/photos/management/commands/raw_processor.py index 029bf696..15e28275 100644 --- a/photonix/photos/management/commands/raw_processor.py +++ b/photonix/photos/management/commands/raw_processor.py @@ -8,6 +8,8 @@ from photonix.photos.models import Task from photonix.photos.utils.raw import process_raw_task from photonix.photos.utils.tasks import requeue_stuck_tasks +from photonix.web.utils import logger + q = queue.Queue() @@ -31,7 +33,7 @@ def run_processors(self): num_workers = cpu_count() threads = [] - print('Starting {} raw processor workers\n'.format(num_workers)) + logger.info(f'Starting {num_workers} raw processor workers') for i in range(num_workers): t = threading.Thread(target=worker) @@ -44,12 +46,12 @@ def run_processors(self): num_remaining = Task.objects.filter(type='process_raw', status='P').count() if num_remaining: - print('{} tasks remaining for raw processing'.format(num_remaining)) + logger.info(f'{num_remaining} tasks remaining for raw processing') # Load 'Pending' tasks onto worker threads for task in Task.objects.filter(type='process_raw', status='P')[:64]: q.put(task) - print('Finished raw processing batch') + logger.info('Finished raw processing batch') # Wait until all threads have finished q.join() diff --git a/photonix/photos/management/commands/raw_scheduler.py b/photonix/photos/management/commands/raw_scheduler.py index d8b0db8e..9c43601f 100644 --- a/photonix/photos/management/commands/raw_scheduler.py +++ b/photonix/photos/management/commands/raw_scheduler.py @@ -4,6 +4,7 @@ from photonix.photos.models import Task from photonix.photos.utils.raw import ensure_raw_processing_tasks +from photonix.web.utils import logger class Command(BaseCommand): @@ -13,9 +14,9 @@ def run_scheduler(self): while True: num_remaining = Task.objects.filter(type='ensure_raw_processed', status='P').count() if num_remaining: - print('{} tasks remaining for raw process scheduling'.format(num_remaining)) + logger.info(f'{num_remaining} tasks remaining for raw process scheduling') ensure_raw_processing_tasks() - print('Finished raw process scheduling') + logger.info('Finished raw process scheduling') sleep(1) def handle(self, *args, **options): diff --git a/photonix/photos/management/commands/rescan_photos.py b/photonix/photos/management/commands/rescan_photos.py index f45cd01c..7a125994 100644 --- a/photonix/photos/management/commands/rescan_photos.py +++ b/photonix/photos/management/commands/rescan_photos.py @@ -5,6 +5,7 @@ from photonix.photos.utils.redis import redis_connection from photonix.photos.utils.organise import rescan_photo_libraries from photonix.photos.utils.system import missing_system_dependencies +from photonix.web.utils import logger class Command(BaseCommand): @@ -16,11 +17,11 @@ def add_arguments(self, parser): def rescan_photos(self, paths): missing = missing_system_dependencies(['exiftool', ]) if missing: - print('Missing dependencies: {}'.format(missing)) + logger.critical(f'Missing dependencies: {missing}') exit(1) rescan_photo_libraries(paths) - print('Rescan complete') + logger.info('Rescan complete') def handle(self, *args, **options): with Lock(redis_connection, 'rescan_photos'): diff --git a/photonix/photos/management/commands/rescan_photos_periodically.py b/photonix/photos/management/commands/rescan_photos_periodically.py index d83ddb19..0e19478a 100644 --- a/photonix/photos/management/commands/rescan_photos_periodically.py +++ b/photonix/photos/management/commands/rescan_photos_periodically.py @@ -7,6 +7,7 @@ from photonix.photos.utils.organise import rescan_photo_libraries from photonix.photos.utils.system import missing_system_dependencies from photonix.photos.utils.redis import redis_connection +from photonix.web.utils import logger class Command(BaseCommand): @@ -18,11 +19,11 @@ def add_arguments(self, parser): def rescan_photos(self, paths): missing = missing_system_dependencies(['exiftool', ]) if missing: - print('Missing dependencies: {}'.format(missing)) + logger.critical(f'Missing dependencies: {missing}') exit(1) rescan_photo_libraries(paths) - print('Rescan complete') + logger.info('Rescan complete') def handle(self, *args, **options): try: diff --git a/photonix/photos/management/commands/retrain_face_similarity_index.py b/photonix/photos/management/commands/retrain_face_similarity_index.py index 6ac72870..8b5a9914 100644 --- a/photonix/photos/management/commands/retrain_face_similarity_index.py +++ b/photonix/photos/management/commands/retrain_face_similarity_index.py @@ -10,6 +10,7 @@ from photonix.photos.models import Library, PhotoTag from photonix.classifiers.face.model import FaceModel +from photonix.web.utils import logger class Command(BaseCommand): @@ -26,18 +27,18 @@ def retrain_face_similarity_index(self): version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) start = time() - print(f'Updating ANN index for Library {library.id}') + logger.info(f'Updating ANN index for Library {library.id}') if PhotoTag.objects.filter(tag__type='F').count() == 0: - print(' No Face PhotoTags in Library so no point in creating face ANN index yet') + logger.info(' No Face PhotoTags in Library so no point in creating face ANN index yet') return if version_date and PhotoTag.objects.filter(updated_at__gt=version_date, tag__type='F').count() == 0: - print(' No new Face PhotoTags in Library so no point in updating face ANN index') + logger.info(' No new Face PhotoTags in Library so no point in updating face ANN index') return FaceModel(library_id=library.id).retrain_face_similarity_index() - print(f' Completed in {(time() - start):.3f}s') + logger.info(f' Completed in {(time() - start):.3f}s') def handle(self, *args, **options): self.retrain_face_similarity_index() diff --git a/photonix/photos/management/commands/thumbnail_processor.py b/photonix/photos/management/commands/thumbnail_processor.py index f2b0e748..e20be367 100644 --- a/photonix/photos/management/commands/thumbnail_processor.py +++ b/photonix/photos/management/commands/thumbnail_processor.py @@ -8,6 +8,8 @@ from photonix.photos.models import Task from photonix.photos.utils.tasks import requeue_stuck_tasks from photonix.photos.utils.thumbnails import generate_thumbnails_for_photo +from photonix.web.utils import logger + q = queue.Queue() @@ -31,7 +33,7 @@ def run_processors(self): num_workers = max(int(cpu_count() / 4), 1) threads = [] - print('Starting {} thumbnail processor workers\n'.format(num_workers)) + logger.info('Starting {} thumbnail processor workers'.format(num_workers)) for i in range(num_workers): t = threading.Thread(target=worker) @@ -44,12 +46,12 @@ def run_processors(self): num_remaining = Task.objects.filter(type='generate_thumbnails', status='P').count() if num_remaining: - print('{} tasks remaining for thumbnail processing'.format(num_remaining)) + logger.info('{} tasks remaining for thumbnail processing'.format(num_remaining)) # Load 'Pending' tasks onto worker threads for task in Task.objects.filter(type='generate_thumbnails', status='P')[:64]: q.put(task) - print('Finished thumbnail processing batch') + logger.info('Finished thumbnail processing batch') # Wait until all threads have finished q.join() diff --git a/photonix/photos/management/commands/watch_photos.py b/photonix/photos/management/commands/watch_photos.py index be583877..fea2e43b 100644 --- a/photonix/photos/management/commands/watch_photos.py +++ b/photonix/photos/management/commands/watch_photos.py @@ -1,6 +1,4 @@ import asyncio -import imghdr -import subprocess from pathlib import Path from time import sleep @@ -12,6 +10,7 @@ from photonix.photos.utils.db import record_photo, move_or_rename_photo, delete_child_dir_all_photos from photonix.photos.models import LibraryPath +from photonix.web.utils import logger class Command(BaseCommand): @@ -67,13 +66,13 @@ async def check_libraries(): for path, id in current_libraries.items(): if path not in watching_libraries: for directory in get_directories_recursive(Path(path)): - print('Watching new path:', directory) + logger.info(f'Watching new path: {directory}') watch = inotify.add_watch(directory, Mask.MODIFY | Mask.CREATE | Mask.DELETE | Mask.CLOSE | Mask.MOVE) watching_libraries[path] = (id, watch) for path, (id, watch) in watching_libraries.items(): if path not in current_libraries: - print('Removing old path:', path) + logger.info(f'Removing old path: {path}') inotify.rm_watch(watch) await asyncio.sleep(4) @@ -88,16 +87,16 @@ async def handle_inotify_events(): photo_moved_from_cookie = moved_from_attr_dict.get('moved_from_cookie') moved_from_attr_dict = {} if event.mask.name == 'MOVED_TO' and photo_moved_from_cookie == event.cookie: - print(f'Moving or renaming the photo "{str(event.path)}" from library "{library_id}"') + logger.info(f'Moving or renaming the photo "{str(event.path)}" from library "{library_id}"') await move_or_rename_photo_async(photo_moved_from_path, event.path, library_id) else: - print(f'Removing photo "{str(photo_moved_from_path)}" from library "{library_id}"') + logger.info(f'Removing photo "{str(photo_moved_from_path)}" from library "{library_id}"') await record_photo_async(photo_moved_from_path, library_id, 'MOVED_FROM') elif Mask.CREATE in event.mask and event.path is not None and event.path.is_dir(): current_libraries = await get_libraries() for path, id in current_libraries.items(): for directory in get_directories_recursive(event.path): - print('Watching newly created child directory:', directory) + logger.info(f'Watching newly created child directory: {directory}') watch = inotify.add_watch(directory, Mask.MODIFY | Mask.CREATE | Mask.DELETE | Mask.CLOSE | Mask.MOVE) watching_libraries[path] = (id, watch) @@ -113,15 +112,14 @@ async def handle_inotify_events(): 'moved_from_path': event.path, 'moved_from_cookie': event.cookie} else: - print(f'Removing photo "{photo_path}" from library "{library_id}"') + logger.info(f'Removing photo "{photo_path}" from library "{library_id}"') await record_photo_async(photo_path, library_id, event.mask.name) elif event.mask.value == 1073741888: - print(f'Delete child directory with its all photos "{photo_path}" to library "{library_id}"') + logger.info(f'Delete child directory with its all photos "{photo_path}" to library "{library_id}"') await delete_child_dir_all_photos_async(photo_path, library_id) else: - if imghdr.what(photo_path) or not subprocess.run(['dcraw', '-i', photo_path]).returncode: - print(f'Adding photo "{photo_path}" to library "{library_id}"') - await record_photo_async(photo_path, library_id, event.mask.name) + logger.info(f'Adding photo "{photo_path}" to library "{library_id}"') + await record_photo_async(photo_path, library_id, event.mask.name) loop = asyncio.get_event_loop() loop.create_task(check_libraries()) @@ -130,7 +128,7 @@ async def handle_inotify_events(): try: loop.run_forever() except KeyboardInterrupt: - print('Shutting down') + logger.info('Shutting down') finally: loop.run_until_complete(loop.shutdown_asyncgens()) loop.close() diff --git a/photonix/photos/models.py b/photonix/photos/models.py index a66e68f9..ff141db3 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -7,6 +7,7 @@ from django.utils import timezone from photonix.common.models import UUIDModel, VersionedModel +from photonix.web.utils import logger User = get_user_model() @@ -327,7 +328,10 @@ def complete(self, next_type=None, next_subject_id=None): self.parent.complete( next_type=next_type, next_subject_id=next_subject_id) - def failed(self): + def failed(self, error=None, traceback=None): self.status = 'F' self.finished_at = timezone.now() self.save() + + if error: + logger.error(error) diff --git a/photonix/photos/utils/classification.py b/photonix/photos/utils/classification.py index 850b3d69..d3a37842 100644 --- a/photonix/photos/utils/classification.py +++ b/photonix/photos/utils/classification.py @@ -5,8 +5,11 @@ from django.db import transaction from django.utils import timezone + from photonix.photos.models import Task, Photo from photonix.photos.utils.tasks import requeue_stuck_tasks +from photonix.web.utils import logger + CLASSIFIERS = [ 'color', @@ -62,11 +65,12 @@ def __worker(self): def __process_task(self, task): try: - print(f'Running task: {task.type} - {task.subject_id}') + logger.info(f'Running task: {task.type} - {task.subject_id}') task.start() self.runner(task.subject_id) task.complete() except Exception: + logger.error(f'Error processing task: {task.type} - {task.subject_id}') traceback.print_exc() task.failed() @@ -78,7 +82,7 @@ def __clean_up(self): t.join() def run(self, loop=True): - print('Starting {} {} workers\n'.format(self.num_workers, self.task_type)) + logger.info('Starting {} {} workers'.format(self.num_workers, self.task_type)) if self.num_workers > 1: for i in range(self.num_workers): @@ -103,7 +107,7 @@ def run(self, loop=True): task_queryset = Task.objects.filter(type=self.task_type, status='P') for task in task_queryset[:8]: if self.num_workers > 1: - print('putting task') + logger.debug('putting task') self.queue.put(task) else: self.__process_task(task) diff --git a/photonix/photos/utils/db.py b/photonix/photos/utils/db.py index 0fe984c0..223685db 100644 --- a/photonix/photos/utils/db.py +++ b/photonix/photos/utils/db.py @@ -1,16 +1,38 @@ +from datetime import datetime +from decimal import Decimal +import imghdr import mimetypes import os import re -from datetime import datetime -from decimal import Decimal +import subprocess from django.utils.timezone import utc from photonix.photos.models import Camera, Lens, Photo, PhotoFile, Task, Library, Tag, PhotoTag -from photonix.photos.utils.metadata import (PhotoMetadata, parse_datetime, parse_gps_location) +from photonix.photos.utils.metadata import PhotoMetadata, parse_datetime, parse_gps_location, get_mimetype +from photonix.web.utils import logger + + +MIMETYPE_WHITELIST = [ + # This list is in addition to the filetypes detected by imghdr and 'dcraw -i' + 'image/heif', + 'image/heif-sequence', + 'image/heic', + 'image/heic-sequence', + 'image/avif', + 'image/avif-sequence', +] def record_photo(path, library, inotify_event_type=None): + logger.info(f'Recording photo {path}') + + mimetype = get_mimetype(path) + + if not imghdr.what(path) and not mimetype in MIMETYPE_WHITELIST and subprocess.run(['dcraw', '-i', path]).returncode: + logger.error(f'File is not a supported type: {path} ({mimetype})') + return None + if type(library) == Library: library_id = library.id else: @@ -148,15 +170,6 @@ def record_photo(path, library, inotify_event_type=None): width = height height = old_width - mimetype = mimetypes.guess_type(path)[0] - # HEIF-based images don't get guessed by mimetypes lib so we use the extension if blank - if not mimetype: - ext = os.path.splitext(path)[1].lower() - if ext == '.heic': - mimetype = 'image/heic' - if ext == '.heics': - mimetype = 'image/heic-sequence' - # Save PhotoFile photo_file.photo = photo photo_file.path = path diff --git a/photonix/photos/utils/metadata.py b/photonix/photos/utils/metadata.py index af3184c2..8fa14e5c 100644 --- a/photonix/photos/utils/metadata.py +++ b/photonix/photos/utils/metadata.py @@ -1,8 +1,9 @@ +from datetime import datetime +from dateutil.parser import parse as parse_date +import mimetypes import os import re from subprocess import Popen, PIPE -from datetime import datetime -from dateutil.parser import parse as parse_date from django.utils.timezone import utc @@ -11,6 +12,7 @@ class PhotoMetadata(object): def __init__(self, path): self.data = {} try: + # exiftool produces data such as MIME Type for non-photos too result = Popen(['exiftool', path], stdout=PIPE, stdin=PIPE, stderr=PIPE).communicate()[0].decode('utf-8') except UnicodeDecodeError: result = '' @@ -22,6 +24,10 @@ def __init__(self, path): except ValueError: pass + # Some file MIME Types can not be identified by exiftool so we fall back to Python's mimetypes library so the get_mimetype() funciton below is universal + if not self.data.get('MIME Type'): + self.data['MIME Type'] = mimetypes.guess_type(path)[0] + def get(self, attribute, default=None): return self.data.get(attribute, default) @@ -89,9 +95,8 @@ def get_dimensions(path): return (int(metadata.data['Image Width']), int(metadata.data['Image Height'])) return (None, None) + def get_mimetype(path): - # Done - """Pulls the MIME Type from the given path""" metadata = PhotoMetadata(path) if metadata.data.get('MIME Type'): return metadata.data.get('MIME Type') diff --git a/photonix/photos/utils/organise.py b/photonix/photos/utils/organise.py index 74a6b1cd..175f3447 100644 --- a/photonix/photos/utils/organise.py +++ b/photonix/photos/utils/organise.py @@ -11,6 +11,7 @@ find_new_file_name, mkdir_p) from photonix.photos.utils.metadata import get_datetime + SYNOLOGY_THUMBNAILS_DIR_NAME = '/@eaDir' diff --git a/photonix/photos/utils/raw.py b/photonix/photos/utils/raw.py index efa38e97..2ceeac2b 100644 --- a/photonix/photos/utils/raw.py +++ b/photonix/photos/utils/raw.py @@ -9,6 +9,7 @@ from django.conf import settings from photonix.photos.models import Photo, PhotoFile, Task +from photonix.web.utils import logger from .metadata import get_dimensions, get_mimetype @@ -60,7 +61,7 @@ def process_raw_task(photo_file_id, task): output_path, version, process_params, external_version = generate_jpeg(photo_file.path) if not output_path: - task.failed() + task.failed('Could not generate JPEG') return if not os.path.isdir(settings.PHOTO_RAW_PROCESSED_DIR): @@ -103,22 +104,29 @@ def __get_exiftool_image(temp_dir, basename): return exiftool_files def __has_acceptable_dimensions(original_image_path, new_image_path, accept_empty_original_dimensions=False): + logger.debug('Checking image dimensions') original_image_dimensions = get_dimensions(original_image_path) + logger.debug(f'Original image dimensions: {original_image_dimensions}') new_image_dimensions = get_dimensions(new_image_path) + logger.debug(f'New image dimensions: {new_image_dimensions}') # We don't know the original dimensions so have nothing to compare to if original_image_dimensions == (None, None): if accept_empty_original_dimensions: + logger.debug('No original dimensions, accepting new dimensions') return True else: + logger.debug('No original dimensions, rejecting new dimensions') return False # Embedded image can't be the full resolution - if not new_image_dimensions[0] or new_image_dimensions[1] or new_image_dimensions[0] < 512 or new_image_dimensions[1] < 512: + if not new_image_dimensions[0] or not new_image_dimensions[1] or new_image_dimensions[0] < 512 or new_image_dimensions[1] < 512: + logger.debug('Dimensions are too small') return False # Embedded image is exactly the same dimensions if original_image_dimensions == new_image_dimensions: + logger.debug('Dimensions match exactly') return True # Embedded image within 95% of the raw width and height @@ -126,8 +134,10 @@ def __has_acceptable_dimensions(original_image_path, new_image_path, accept_empt and original_image_dimensions[1] / new_image_dimensions[1] > 0.95 \ and new_image_dimensions[0] / original_image_dimensions[0] > 0.95 \ and new_image_dimensions[1] / original_image_dimensions[1] > 0.95: + logger.debug('Dimensions match closely enough') return True + logger.debug('Dimensions are not good') return False @@ -171,8 +181,15 @@ def __exiftool_version(): except AttributeError: return +def __delete_file_silently(path): + try: + os.remove(path) + except FileNotFoundError: + pass + def generate_jpeg(path): + logger.debug(f'Generating JPEG for raw file {path}') basename = os.path.basename(path) temp_dir = tempfile.mkdtemp() temp_input_path = Path(temp_dir) / basename @@ -185,6 +202,7 @@ def generate_jpeg(path): # Handle Canon's CR3 format since their thumbnails are proprietary. mimetype = get_mimetype(temp_input_path) if mimetype == 'image/x-canon-cr3': + logger.debug('File type detected as Canon Raw v3') subprocess.Popen([ 'exiftool', '-b', '-JpgFromRaw', '-w', 'jpg', '-ext', 'CR3', temp_input_path, '-execute', '-tagsfromfile', temp_input_path, @@ -204,11 +222,13 @@ def generate_jpeg(path): process_params = 'exiftool -b -JpgFromRaw' external_version = __exiftool_version() elif mimetype in ['image/heif', 'image/heic']: + logger.debug('File type detected as HIEF/HEIC') temp_output_path = Path(temp_dir) / 'out.jpg' subprocess.run(['heif-convert', '-q', '90', temp_input_path, temp_output_path]) process_params = 'heif-convert -q 90' external_version = __heif_convert_version() else: + logger.debug('Attempting to extract JPEG using dcraw') # Try to extract the JPEG that might be inside the raw file subprocess.run(['dcraw', '-e', temp_input_path]) temp_output_path = __get_generated_image(temp_dir, basename) @@ -218,54 +238,66 @@ def generate_jpeg(path): # Check the JPEGs dimensions are close enough to the raw's dimensions if temp_output_path: if __has_acceptable_dimensions(temp_input_path, temp_output_path): + logger.debug('JPEG file looks good so far') valid_image = True else: - os.remove(temp_output_path) + __delete_file_silently(temp_output_path) # Next try to use embedded profile to generate an image if not valid_image: + logger.debug('Attempting to generate JPEG with dcraw using embedded color profile') subprocess.run(['dcraw', '-p embed', temp_input_path]) temp_output_path = __get_generated_image(temp_dir, basename) if temp_output_path: if __has_acceptable_dimensions(temp_input_path, temp_output_path): + logger.debug('JPEG file looks good so far') valid_image = True process_params = 'dcraw -p embed' else: - os.remove(temp_output_path) + __delete_file_silently(temp_output_path) # Finally try to use the embedded whitebalance to generate an image if not valid_image: + logger.debug('Attempting to generate JPEG with dcraw using embedded white balance') subprocess.run(['dcraw', '-w', temp_input_path]) temp_output_path = __get_generated_image(temp_dir, basename) if temp_output_path: if __has_acceptable_dimensions(temp_input_path, temp_output_path, True): + logger.debug('JPEG file looks good so far') valid_image = True process_params = 'dcraw -w' else: - os.remove(temp_output_path) + __delete_file_silently(temp_output_path) # If extracted image isn't a JPEG then we need to convert it if valid_image: valid_image = identified_as_jpeg(temp_output_path) if not valid_image: + logger.debug('JPEG didn\'t pass test, attempting bitmap conversion') jpeg_path = tempfile.mktemp() bitmap_to_jpeg(temp_output_path, jpeg_path) if identified_as_jpeg(jpeg_path): + logger.debug('JPEG file now passes test') temp_output_path = jpeg_path valid_image = True # Move the outputted file to a new temporary location if valid_image: + logger.debug('I\'m happy with the JPEG so moving it to a new location') final_path = tempfile.mktemp() os.rename(temp_output_path, final_path) # Delete the temporary working directory + logger.debug('Deleting temporary files') shutil.rmtree(temp_dir) if valid_image: + logger.debug(f'Returning info about JPEG which is temporarily located here: {final_path}') return (final_path, RAW_PROCESS_VERSION, process_params, external_version) + + logger.error('Couldn\'t make JPEG from raw file') return (None, RAW_PROCESS_VERSION, None, None) diff --git a/photonix/photos/utils/thumbnails.py b/photonix/photos/utils/thumbnails.py index c897e659..acd16b5d 100644 --- a/photonix/photos/utils/thumbnails.py +++ b/photonix/photos/utils/thumbnails.py @@ -28,7 +28,7 @@ def generate_thumbnails_for_photo(photo, task): try: photo = Photo.objects.get(id=photo) except Photo.DoesNotExist: - task.failed() + task.failed(f'Photo instance does not exist with id={photo}') return for thumbnail in settings.THUMBNAIL_SIZES: diff --git a/photonix/web/settings.py b/photonix/web/settings.py index d581bb45..45ba9241 100644 --- a/photonix/web/settings.py +++ b/photonix/web/settings.py @@ -92,6 +92,46 @@ } } +LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'color': { + '()': 'colorlog.ColoredFormatter', + 'format': '%(log_color)s%(asctime)s %(levelname)-8s %(message)s', + 'log_colors': { + 'DEBUG': 'cyan', + 'INFO': 'green', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'CRITICAL': 'white,bg_red', + }, + } + }, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'color', + }, + }, + 'root': { + 'handlers': ['console'], + 'level': 'WARNING', + }, + 'loggers': { + 'django': { + 'handlers': ['console'], + 'level': os.getenv('DJANGO_LOG_LEVEL', 'WARNING'), + 'propagate': False, + }, + 'photonix': { + 'handlers': ['console'], + 'level': os.getenv('LOG_LEVEL', 'INFO'), + 'propagate': False, + }, + }, +} + AUTHENTICATION_BACKENDS = [ 'graphql_jwt.backends.JSONWebTokenBackend', 'django.contrib.auth.backends.ModelBackend', diff --git a/photonix/web/utils.py b/photonix/web/utils.py index 5d33cbea..08ecc39c 100644 --- a/photonix/web/utils.py +++ b/photonix/web/utils.py @@ -1,3 +1,4 @@ +import logging import os from django.core.management import utils @@ -6,6 +7,9 @@ from photonix.photos.utils.redis import redis_connection +logger = logging.getLogger('photonix') + + def get_secret_key(): # To avoid each installation having the same Django SECERT_KEY we generate # a random one and store it in Redis. We have to store it somewhere diff --git a/requirements.txt b/requirements.txt index ca29e2a6..ac8d6857 100644 --- a/requirements.txt +++ b/requirements.txt @@ -34,3 +34,4 @@ pytest-django==3.8.0 mock==3.0.5 factory-boy==2.12.0 coverage==5.0.3 +colorlog==5.0.1 diff --git a/system/nginx_dev.conf b/system/nginx_dev.conf index 595212cc..eaebb9e9 100644 --- a/system/nginx_dev.conf +++ b/system/nginx_dev.conf @@ -14,12 +14,7 @@ events { http { include /etc/nginx/mime.types; default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /dev/stdout; + access_log off; sendfile on; tcp_nopush on; @@ -40,19 +35,16 @@ http { location ~ ^/(favicon.png|manifest.json|logo.svg) { root /srv/ui/public; - access_log off; expires 1d; } location /photos { root /data; - access_log off; expires 1d; } location /thumbnails { root /data/cache; - access_log off; expires 1d; } diff --git a/system/nginx_prd.conf b/system/nginx_prd.conf index 93d94733..d671d81f 100644 --- a/system/nginx_prd.conf +++ b/system/nginx_prd.conf @@ -14,12 +14,7 @@ events { http { include /etc/nginx/mime.types; default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /dev/stdout; + access_log off; sendfile on; tcp_nopush on; @@ -40,19 +35,16 @@ http { location /photos { root /data; - access_log off; expires 1d; } location /thumbnails { root /data/cache; - access_log off; expires 1d; } location /static-collected { root /srv; - access_log off; expires 1d; } @@ -71,7 +63,6 @@ http { location / { root /srv/ui/build; - access_log off; expires 1d; ssi on; try_files $uri /index.html =404; From 116823d5c6c192311b0f40d07d2b9667faa26a01 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Sun, 11 Jul 2021 22:51:51 +0100 Subject: [PATCH 077/110] Attempt at suppressing tensorflow warnings, tweaking number of raw processor workers --- docker/Dockerfile.prd | 1 + photonix/manage.py | 1 + photonix/photos/management/commands/raw_processor.py | 2 +- 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index 33686966..7790faed 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -141,6 +141,7 @@ COPY system/cron.d /etc/cron.d/ RUN chmod 0644 /etc/cron.d/* ENV PYTHONPATH /srv +ENV TF_CPP_MIN_LOG_LEVEL 3 RUN DJANGO_SECRET_KEY=test python photonix/manage.py collectstatic --noinput --link diff --git a/photonix/manage.py b/photonix/manage.py index 53daf0a7..68501c16 100755 --- a/photonix/manage.py +++ b/photonix/manage.py @@ -2,6 +2,7 @@ import os import sys + if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "photonix.web.settings") try: diff --git a/photonix/photos/management/commands/raw_processor.py b/photonix/photos/management/commands/raw_processor.py index 15e28275..16ff4f13 100644 --- a/photonix/photos/management/commands/raw_processor.py +++ b/photonix/photos/management/commands/raw_processor.py @@ -30,7 +30,7 @@ class Command(BaseCommand): help = 'Processes raw photos into a JPEG we can use elsewhere.' def run_processors(self): - num_workers = cpu_count() + num_workers = max(int(cpu_count() / 4), 1) threads = [] logger.info(f'Starting {num_workers} raw processor workers') From eb1f1fcede68f5127f66251bb53e008ec7a174b4 Mon Sep 17 00:00:00 2001 From: GyanP Date: Mon, 12 Jul 2021 23:23:23 +0530 Subject: [PATCH 078/110] applied custom case in person filter to sort person list and keep Unknown Person tags always in end. --- photonix/photos/schema.py | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 21adec45..c7f4561b 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -8,6 +8,7 @@ from graphene_django.types import DjangoObjectType from graphql_jwt.decorators import login_required from graphql import GraphQLError +from django.db.models import Case, When, Value, IntegerField import graphene from .models import Library, Camera, Lens, Photo, Tag, PhotoTag, LibraryPath, LibraryUser, PhotoFile, Task @@ -354,8 +355,26 @@ def resolve_all_person_tags(self, info, **kwargs): photos_list = filter_photos_queryset( filters, Photo.objects.filter(library__users__user=user), kwargs.get('library_id')) - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list).order_by(Lower('name')).distinct() - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__deleted=False).order_by(Lower('name')).distinct() + # return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list).order_by(Lower('name')).distinct() + return Tag.objects.filter( + library__users__user=user, + library__id=kwargs.get('library_id'), + type='F', + photo_tags__photo__in=photos_list + ).annotate(unknown_tag=Case( + When(name__startswith='Unknown', then=Value(1)), + default=Value(2), + output_field=IntegerField(),)).order_by("-unknown_tag",Lower('name')).distinct() + # return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__deleted=False).order_by(Lower('name')).distinct() + return Tag.objects.filter( + library__users__user=user, + library__id=kwargs.get('library_id'), + type='F', + photo_tags__deleted=False + ).annotate(unknown_tag=Case( + When(name__startswith='Unknown', then=Value(1)), + default=Value(2), + output_field=IntegerField(),)).order_by("-unknown_tag",Lower('name')).distinct() def resolve_all_color_tags(self, info, **kwargs): user = info.context.user From 2a0fdfa9a04be73c4c207bc3acc5fe464a4e28f1 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Mon, 12 Jul 2021 22:27:16 +0100 Subject: [PATCH 079/110] Formatting and comments --- photonix/photos/schema.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index c7f4561b..5fb1e8ed 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -355,26 +355,32 @@ def resolve_all_person_tags(self, info, **kwargs): photos_list = filter_photos_queryset( filters, Photo.objects.filter(library__users__user=user), kwargs.get('library_id')) - # return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list).order_by(Lower('name')).distinct() + # Sort Person tags but keep "Unknown..." ones at the end return Tag.objects.filter( library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__photo__in=photos_list - ).annotate(unknown_tag=Case( + ).annotate( + unknown_tag=Case( When(name__startswith='Unknown', then=Value(1)), default=Value(2), - output_field=IntegerField(),)).order_by("-unknown_tag",Lower('name')).distinct() - # return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__deleted=False).order_by(Lower('name')).distinct() + output_field=IntegerField(), + ) + ).order_by("-unknown_tag", Lower('name')).distinct() + # Sort Person tags but keep "Unknown..." ones at the end return Tag.objects.filter( library__users__user=user, library__id=kwargs.get('library_id'), type='F', photo_tags__deleted=False - ).annotate(unknown_tag=Case( - When(name__startswith='Unknown', then=Value(1)), - default=Value(2), - output_field=IntegerField(),)).order_by("-unknown_tag",Lower('name')).distinct() + ).annotate( + unknown_tag=Case( + When(name__startswith='Unknown', then=Value(1)), + default=Value(2), + output_field=IntegerField(), + ) + ).order_by("-unknown_tag", Lower('name')).distinct() def resolve_all_color_tags(self, info, **kwargs): user = info.context.user From d3d64033d34824cd5521933725f455c7b065c3e8 Mon Sep 17 00:00:00 2001 From: GyanP Date: Tue, 13 Jul 2021 22:11:17 +0530 Subject: [PATCH 080/110] task completed --- ui/src/components/PhotoDetail.js | 27 ++++++++++++++++----------- ui/src/components/ZoomableImage.js | 24 +++++++++++++++++------- 2 files changed, 33 insertions(+), 18 deletions(-) diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index 1c4ed4ff..c72c64c2 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -102,7 +102,7 @@ const Container = styled('div')` const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { const safeArea = useSelector(getSafeArea) - const [showFaceIcons, setShowFaceIcons] = useState(true) + // const [showFaceIcons, setShowFaceIcons] = useState(true) const [showBoundingBox, setShowBoundingBox] = useLocalStorageState( 'showObjectBoxes', true @@ -113,7 +113,7 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { getPrevNextPhotos(state, photoId) ) const [numHistoryPushes, setNumHistoryPushes] = useState(0) - + const [showTopIcons, setShowTopIcons] = useState(true) // TODO: Bring this back so it doesn't get triggered by someone adding a tag with 'i' in it // useEffect(() => { // const handleKeyDown = (event) => { @@ -203,15 +203,20 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { -
{ } }} /> -
+
}
{ /> )} - {!showMetadata ? ( + {showTopIcons && (!showMetadata ? ( { style={{ marginTop: safeArea.top }} // title="Press [I] key to show/hide photo details" /> - )} - {photo?.downloadUrl && ( + ))} + {showTopIcons && (photo?.downloadUrl && ( { style={{ marginTop: safeArea.top, padding: 3 }} /> - )} + ))} ) } diff --git a/ui/src/components/ZoomableImage.js b/ui/src/components/ZoomableImage.js index 5a4dcd5a..866ace99 100644 --- a/ui/src/components/ZoomableImage.js +++ b/ui/src/components/ZoomableImage.js @@ -85,8 +85,13 @@ const ZoomableImage = ({ prev, refetch, showBoundingBox, - showFaceIcons, - setShowFaceIcons, + // showFaceIcons, + // setShowFaceIcons, + setShowBoundingBox, + setShowMetadata, + showMetadata, + showTopIcons, + setShowTopIcons, }) => { const [scale, setScale] = useState(1) const [zoom, setZoom] = useState(false) @@ -153,13 +158,18 @@ const ZoomableImage = ({ // To handle icon show hide on single click. const showHideIcons = (event) => { - if(clickTimeOut !== null){ + if (clickTimeOut !== null) { clearTimeout(clickTimeOut) - }else{ - clickTimeOut = setTimeout(()=>{ - setShowFaceIcons(!showFaceIcons) + } else { + clickTimeOut = setTimeout(() => { + if (showMetadata) { + setShowMetadata(!showMetadata) + } else { + setShowBoundingBox(!showBoundingBox) + setShowTopIcons(!showTopIcons) + } clearTimeout(clickTimeOut) - clickTimeOut=null + clickTimeOut = null },300) } } From cecf4777a97107e76f73739dcfe264b27c1e7bee Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Tue, 13 Jul 2021 19:12:13 +0100 Subject: [PATCH 081/110] Tipy-ups and a bug fix --- ui/src/components/PhotoDetail.js | 94 +++++++++++++++--------------- ui/src/components/ZoomableImage.js | 14 ++--- 2 files changed, 53 insertions(+), 55 deletions(-) diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index c72c64c2..7e0765ba 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -102,7 +102,6 @@ const Container = styled('div')` const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { const safeArea = useSelector(getSafeArea) - // const [showFaceIcons, setShowFaceIcons] = useState(true) const [showBoundingBox, setShowBoundingBox] = useLocalStorageState( 'showObjectBoxes', true @@ -170,7 +169,7 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { }, [photoId, prevNextPhotos, prevPhoto, nextPhoto]) const setBoxColorClass = (tag) => { - return tag.deleted ? 'whiteBox' : tag.verified ? 'greenBox' : 'yellowBox'; + return tag.deleted ? 'whiteBox' : tag.verified ? 'greenBox' : 'yellowBox' } let boxes = { @@ -193,8 +192,8 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { sizeY: tag.sizeY, verified: tag.verified, deleted: tag.deleted, - boxColorClass: setBoxColorClass(tag), - showVerifyIcon: tag.showVerifyIcon, + boxColorClass: setBoxColorClass(tag), + showVerifyIcon: tag.showVerifyIcon, } }), } @@ -205,8 +204,6 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { photoId={photoId} boxes={showBoundingBox && boxes} showBoundingBox={showBoundingBox} - // showFaceIcons={showFaceIcons} - // setShowFaceIcons={setShowFaceIcons} setShowBoundingBox={setShowBoundingBox} showMetadata={showMetadata} setShowMetadata={setShowMetadata} @@ -216,26 +213,28 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { prev={prevPhoto} refetch={refetch} /> - {showTopIcons &&
- { - if ( - history.length - numHistoryPushes > 2 || - document.referrer !== '' - ) { - history.goBack() - // history.go(-(numHistoryPushes + 1)) - } else { - history.push('/') - } - }} - /> -
} + {showTopIcons && ( +
+ { + if ( + history.length - numHistoryPushes > 2 || + document.referrer !== '' + ) { + history.goBack() + // history.go(-(numHistoryPushes + 1)) + } else { + history.push('/') + } + }} + /> +
+ )}
{ /> )} - {showTopIcons && (!showMetadata ? ( - setShowMetadata(!showMetadata)} - style={{ marginTop: safeArea.top }} - // title="Press [I] key to show/hide photo details" - /> - ) : ( - setShowMetadata(!showMetadata)} - style={{ marginTop: safeArea.top }} - // title="Press [I] key to show/hide photo details" - /> - ))} - {showTopIcons && (photo?.downloadUrl && ( + {showTopIcons && + (!showMetadata ? ( + setShowMetadata(!showMetadata)} + style={{ marginTop: safeArea.top }} + // title="Press [I] key to show/hide photo details" + /> + ) : ( + setShowMetadata(!showMetadata)} + style={{ marginTop: safeArea.top }} + // title="Press [I] key to show/hide photo details" + /> + ))} + {showTopIcons && photo?.downloadUrl && ( { style={{ marginTop: safeArea.top, padding: 3 }} /> - ))} + )} ) } diff --git a/ui/src/components/ZoomableImage.js b/ui/src/components/ZoomableImage.js index 866ace99..bafd4fc2 100644 --- a/ui/src/components/ZoomableImage.js +++ b/ui/src/components/ZoomableImage.js @@ -85,8 +85,6 @@ const ZoomableImage = ({ prev, refetch, showBoundingBox, - // showFaceIcons, - // setShowFaceIcons, setShowBoundingBox, setShowMetadata, showMetadata, @@ -97,7 +95,7 @@ const ZoomableImage = ({ const [zoom, setZoom] = useState(false) const [loading, setLoading] = useState(true) const [displayImage, setDisplayImage] = useState(false) - let clickTimeOut = null; + let clickTimeOut = null const prevNextPhotos = useSelector((state) => getPrevNextPhotos(state, photoId) @@ -161,17 +159,16 @@ const ZoomableImage = ({ if (clickTimeOut !== null) { clearTimeout(clickTimeOut) } else { - clickTimeOut = setTimeout(() => { + clickTimeOut = setTimeout(() => { if (showMetadata) { setShowMetadata(!showMetadata) } else { - setShowBoundingBox(!showBoundingBox) setShowTopIcons(!showTopIcons) } clearTimeout(clickTimeOut) clickTimeOut = null - },300) - } + }, 300) + } } return ( @@ -202,13 +199,14 @@ const ZoomableImage = ({ className={displayImage ? 'display' : undefined} /> {boxes && + showTopIcons && Object.keys(boxes).map((key, index) => ( Date: Tue, 13 Jul 2021 22:24:32 +0100 Subject: [PATCH 082/110] Pins exact version of opencv-python to fix CI build for ARMv7 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index ac8d6857..0fb06b84 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,7 +2,7 @@ numpy==1.19.2 scipy==1.4.1 matplotlib==3.1.2 tensorflow==2.4.1 -opencv-python>=3.4.4 +opencv-python==4.5.1.48 annoy==1.17.0 Django==3.2.3 From 618eeb79ba477c6b6eeafcc94189ff9d28189c09 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Wed, 14 Jul 2021 22:23:25 +0100 Subject: [PATCH 083/110] Fix for face recognition on ARM --- docker/Dockerfile.prd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index 7790faed..053f37af 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -14,7 +14,6 @@ RUN apt-get update && \ libblas3 \ libfreetype6 \ libfreetype6-dev \ - libhdf5-dev \ libjpeg-dev \ liblapack-dev \ liblapack3 \ @@ -104,6 +103,7 @@ RUN apt-get update && \ libfreetype6-dev \ libgl1 \ libglib2.0-dev \ + libhdf5-dev \ libheif-examples \ libimage-exiftool-perl \ libpq-dev \ From 8b7b57608c0dcb776c302cbb79609ce07dda0d4e Mon Sep 17 00:00:00 2001 From: Gyan P Date: Fri, 9 Apr 2021 22:35:14 +0530 Subject: [PATCH 084/110] query implementaion for task 192 in backend --- photonix/photos/schema.py | 32 ++++++++++++++++++++++++++++---- photonix/photos/utils/tasks.py | 11 ++++++++--- 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 5fb1e8ed..81121f7b 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -2,19 +2,21 @@ from django.conf import settings from django.contrib.auth import get_user_model, load_backend, login -import django_filters from django_filters import CharFilter from graphene_django.filter import DjangoFilterConnectionField from graphene_django.types import DjangoObjectType from graphql_jwt.decorators import login_required from graphql import GraphQLError -from django.db.models import Case, When, Value, IntegerField -import graphene - +from django.db.models import Case, When, Value, IntegerField, Q +from django.contrib.auth import load_backend, login +from photonix.photos.utils.tasks import count_remaining_task from .models import Library, Camera, Lens, Photo, Tag, PhotoTag, LibraryPath, LibraryUser, PhotoFile, Task from photonix.photos.utils.filter_photos import filter_photos_queryset, sort_photos_exposure from photonix.photos.utils.metadata import PhotoMetadata from django.db.models.functions import Lower +import django_filters +import graphene +import os User = get_user_model() @@ -208,6 +210,17 @@ class PhotoMetadataFields(graphene.ObjectType): ok = graphene.Boolean() +class TaskType(graphene.ObjectType): + """Different type of tasks.""" + + generate_thumbnails = graphene.types.generic.GenericScalar() + process_raw = graphene.types.generic.GenericScalar() + classify_color = graphene.types.generic.GenericScalar() + classify_location = graphene.types.generic.GenericScalar() + classify_object = graphene.types.generic.GenericScalar() + classify_style = graphene.types.generic.GenericScalar() + + class Query(graphene.ObjectType): all_libraries = graphene.List(LibraryType) camera = graphene.Field(CameraType, id=graphene.UUID(), make=graphene.String(), model=graphene.String()) @@ -237,6 +250,7 @@ class Query(graphene.ObjectType): all_generic_tags = graphene.List(LocationTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) library_setting = graphene.Field(LibrarySetting, library_id=graphene.UUID()) photo_file_metadata = graphene.Field(PhotoMetadataFields, photo_file_id=graphene.UUID()) + task_progress = graphene.Field(TaskType) def resolve_all_libraries(self, info, **kwargs): user = info.context.user @@ -452,6 +466,16 @@ def resolve_photo_file_metadata(self, info, **kwargs): } return {'ok': False} + def resolve_task_progress(self, info, **kwargs): + """Return No. of remaining and total tasks with there diffrent types.""" + return { + "generate_thumbnails": count_remaining_task('generate_thumbnails'), + "process_raw": count_remaining_task('process_raw'), + "classify_color": count_remaining_task('classify.color'), + "classify_location": count_remaining_task('classify.location'), + "classify_object": count_remaining_task('classify.object'), + "classify_style": count_remaining_task('classify.style')} + class LibraryInput(graphene.InputObjectType): """LibraryInput to take input of library fields from frontend.""" diff --git a/photonix/photos/utils/tasks.py b/photonix/photos/utils/tasks.py index 66b3f05a..11489fe6 100644 --- a/photonix/photos/utils/tasks.py +++ b/photonix/photos/utils/tasks.py @@ -1,11 +1,10 @@ from datetime import timedelta from django.utils import timezone - +from django.db.models import Q from photonix.photos.models import Task - -def requeue_stuck_tasks(task_type, age_hours=0.01, max_num=8): +def requeue_stuck_tasks(task_type, age_hours=24, max_num=8): # Set old, failed jobs to Pending for task in Task.objects.filter(type=task_type, status='S', updated_at__lt=timezone.now() - timedelta(hours=24))[:max_num]: task.status = 'P' @@ -13,3 +12,9 @@ def requeue_stuck_tasks(task_type, age_hours=0.01, max_num=8): for task in Task.objects.filter(type=task_type, status='F', updated_at__lt=timezone.now() - timedelta(hours=24))[:max_num]: task.status = 'P' task.save() + +def count_remaining_task(task_type): + """Returned count of remaining task.""" + return { + 'total': Task.objects.filter(type=task_type).count(), + 'remaining': Task.objects.filter(Q(type=task_type), Q(status='P') | Q(status='S')).count()} From 2d5dbe2cf23e90ab8f25750e89d927bddf32dc06 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Wed, 14 Apr 2021 20:06:14 +0530 Subject: [PATCH 085/110] frontend implementation of progress notification --- ui/src/components/Header.js | 9 +- ui/src/components/Notification.js | 279 ++++++++++++++++++++++++++++++ ui/src/components/Settings.js | 6 +- ui/src/components/User.js | 2 +- ui/src/graphql/settings.js | 12 ++ ui/src/static/images/pause.svg | 1 + ui/src/static/images/play.svg | 1 + 7 files changed, 299 insertions(+), 11 deletions(-) create mode 100644 ui/src/components/Notification.js create mode 100644 ui/src/static/images/pause.svg create mode 100644 ui/src/static/images/play.svg diff --git a/ui/src/components/Header.js b/ui/src/components/Header.js index 22990226..eadb6225 100644 --- a/ui/src/components/Header.js +++ b/ui/src/components/Header.js @@ -3,10 +3,10 @@ import { useSelector } from 'react-redux' import styled from '@emotion/styled' import User from './User' +import Notification from './Notification' import { getIsMobileApp, getSafeArea } from '../stores/layout/selector' import logo from '../static/images/logo.svg' import menuIcon from '../static/images/menu.svg' -// import notifications from '../static/images/notifications.svg' const Container = styled('div')` height: 50px; @@ -45,9 +45,6 @@ const Container = styled('div')` .navigation { flex-grow: 1; } - .notifications { - width: 50px; - } ` const Header = ({ profile, libraries }) => { @@ -71,9 +68,7 @@ const Header = ({ profile, libraries }) => { Photonix
- {/*
- Notifications -
*/} + ) diff --git a/ui/src/components/Notification.js b/ui/src/components/Notification.js new file mode 100644 index 00000000..49cf0755 --- /dev/null +++ b/ui/src/components/Notification.js @@ -0,0 +1,279 @@ +import React, { useRef, useState, useEffect } from 'react' +import styled from '@emotion/styled' +import { Progress, Box, Flex } from "@chakra-ui/core" +import { useQuery, useMutation } from '@apollo/react-hooks' +import { useSelector } from 'react-redux' + +import notifications from '../static/images/notifications.svg' +import play from '../static/images/play.svg' +import pause from '../static/images/pause.svg' +import { GET_TASK_PROGRESS } from '../graphql/settings' +import { getActiveLibrary } from '../stores/libraries/selector' +import { useComponentVisible } from './User' +import { useSettings } from './Settings' +import { + SETTINGS_STYLE, + SETTINGS_COLOR, + SETTINGS_LOCATION, + SETTINGS_OBJECT, + GET_SETTINGS +} from '../graphql/settings' + +const Container = styled('div')` + margin-right:10px; + > img { + filter: invert(0.9); + padding: 10px 0 10px 10px; + width: 50px; + height: 50px; + cursor: pointer; + } + .userMenu { + position: absolute; + width: 290px; + right: 0px; + top: 50px; + z-index: 10; + background: #484848; + margin: 0; + list-style: none; + padding: 0; + box-shadow: -3px 8px 17px rgba(0, 0, 0, 0.15); + } + .isMobileApp header .userMenu { + top: 80px; + } + .userMenu li { + padding: 12px 15px 12px 15px; + cursor: default; + // display: flex; + margin-bottom: 20px; + } + .userMenu li:last-child { + margin-bottom: 10px; + } + .userMenu li:hover { + background: rgba(255, 255, 255, 0.1); + } + .userMenu li img { + padding: 0; + width: 35px; + height: 35px; + vertical-align: -6px; + margin-right: 10px; + filter: invert(0.9); + cursor: pointer; + } +` +const Notification = () => { + const activeLibrary = useSelector(getActiveLibrary) + const [settings, setSettings] = useSettings(activeLibrary) + const [showNotificationIcon, setShowNotificationIcon] = useState(true) + const [firstRun, setFirstRun] = useState(true) + const { + ref, + isComponentVisible, + setIsComponentVisible, + } = useComponentVisible(false) + const handleShowMenu = () => { + if (!isComponentVisible) { + setIsComponentVisible(true) + settingsRefetch() + } + } + const { data, refetch } = useQuery(GET_TASK_PROGRESS) + const { data: settingsData, refetch: settingsRefetch } = useQuery(GET_SETTINGS, { + variables: { libraryId: activeLibrary?.id }, + }) + const [settingUpdateStyle] = useMutation(SETTINGS_STYLE) + const [settingUpdateColor] = useMutation(SETTINGS_COLOR) + const [settingUpdateLocation] = useMutation(SETTINGS_LOCATION) + const [settingUpdateObject] = useMutation(SETTINGS_OBJECT) + + const getTitle = key => { + switch(key) { + case 'generateThumbnails': + return 'Generating thumbnails' + case 'processRaw': + return 'Processing raw files' + case 'classifyColor': + return 'Analysing colors' + case 'classifyObject': + return 'Analysing objects' + case 'classifyLocation': + return 'Analysing locations' + case 'classifyStyle': + return 'Analysing styles' + default: + return ''; + } + } + + const getKeys = (data) => { + let keys = Object.keys(data.taskProgress) + keys.splice(keys.length-1) + return keys + } + + useEffect(() => { + if (data && firstRun) { + getKeys(data).map(key => { + if (data.taskProgress[key]?.total > 0) + window.sessionStorage.setItem(key, data.taskProgress[key]?.total) + }) + setFirstRun(false) + } + }, [data, firstRun]) + + const refetchTasks = () => { + refetch() + if (data && !firstRun) { + getKeys(data).map(key => { + const sessionVal = window.sessionStorage.getItem(key) + const remaining = data.taskProgress[key]?.remaining + if (remaining > sessionVal) { + window.sessionStorage.setItem(key, data.taskProgress[key]?.total) + } else if(remaining === 0) { + window.sessionStorage.setItem(key, 0) + } + }) + } + } + useEffect(() => { + let handle = setInterval(refetchTasks, 60000) + return () => { + clearInterval(handle) + } + }) + + const getNotificationKeys = (data) =>{ + const keys = getKeys(data) + const remaining = keys.filter(k => data.taskProgress[k].remaining > 0) + if (remaining.length) { + !showNotificationIcon && setShowNotificationIcon(true) + } else { + showNotificationIcon && setShowNotificationIcon(false) + } + return remaining + } + + const getProgressPercent = key => { + return ((window.sessionStorage.getItem(key) - data.taskProgress[key]?.remaining) / window.sessionStorage.getItem(key)) * 100 + } + const getSettingsKey = key => { + switch(key) { + case 'classifyObject': + return 'classificationObjectEnabled' + case 'classifyColor': + return 'classificationColorEnabled' + case 'classifyLocation': + return 'classificationLocationEnabled' + case 'classifyStyle': + return 'classificationStyleEnabled' + default: + return null + } + } + + const getSetting = key => { + switch(key) { + case 'classifyObject': + return settings.classificationObjectEnabled + case 'classifyColor': + return settings.classificationColorEnabled + case 'classifyLocation': + return settings.classificationLocationEnabled + case 'classifyStyle': + return settings.classificationStyleEnabled + default: + return '' + } + } + const toggleBooleanSetting = key => { + let newSettings = { ...settings } + newSettings[getSettingsKey(key)] = !settings[getSettingsKey(key)] + setSettings(newSettings) + switch (getSettingsKey(key)) { + case 'classificationStyleEnabled': + settingUpdateStyle({ + variables: { + classificationStyleEnabled: newSettings.classificationStyleEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key + case 'classificationLocationEnabled': + settingUpdateLocation({ + variables: { + classificationLocationEnabled: + newSettings.classificationLocationEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key + case 'classificationObjectEnabled': + settingUpdateObject({ + variables: { + classificationObjectEnabled: + newSettings.classificationObjectEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key + case 'classificationColorEnabled': + settingUpdateColor({ + variables: { + classificationColorEnabled: newSettings.classificationColorEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key + default: + return null + } + } + + return ( + <> + {showNotificationIcon ? + + Notification +
    + {data? + getNotificationKeys(data).map((key, index) => ( +
  • + + + + {getTitle(key)} + + {data.taskProgress[key]?.total-data.taskProgress[key]?.remaining}/{data.taskProgress[key]?.total} + + + + + + {key !== 'generateThumbnails' && key !== 'processRaw' ? + settings[getSettingsKey(key)] ? + toggleBooleanSetting(key)} /> + : + toggleBooleanSetting(key)} /> + : + null + } + + +
  • + )) + : null} +
+
+ : null } + + ) +} + +export default Notification diff --git a/ui/src/components/Settings.js b/ui/src/components/Settings.js index 89c8caa3..d3458d46 100644 --- a/ui/src/components/Settings.js +++ b/ui/src/components/Settings.js @@ -193,13 +193,14 @@ export default function Settings() { ) } -const useSettings = (activeLibrary) => { +export const useSettings = (activeLibrary) => { const [existingSettings, setSettings] = useState({}) const { loading, data, refetch } = useQuery(GET_SETTINGS, { variables: { libraryId: activeLibrary?.id }, }) + // console.log(error) const isInitialMount = useRef(true) - + useEffect(() => { refetch() }, [activeLibrary, refetch]) @@ -227,6 +228,5 @@ const useSettings = (activeLibrary) => { } setSettings(newSettings) } - return [existingSettings, setAndSaveSettings] } diff --git a/ui/src/components/User.js b/ui/src/components/User.js index 84cb25da..06826977 100644 --- a/ui/src/components/User.js +++ b/ui/src/components/User.js @@ -108,7 +108,7 @@ const Container = styled('div')` } ` -function useComponentVisible(initialIsVisible) { +export const useComponentVisible = (initialIsVisible) => { const [isComponentVisible, setIsComponentVisible] = useState(initialIsVisible) const ref = useRef(null) diff --git a/ui/src/graphql/settings.js b/ui/src/graphql/settings.js index 49193202..06d2debe 100644 --- a/ui/src/graphql/settings.js +++ b/ui/src/graphql/settings.js @@ -100,3 +100,15 @@ export const GET_SETTINGS = gql` } } ` +export const GET_TASK_PROGRESS = gql` + query TaskProgress { + taskProgress { + generateThumbnails + processRaw + classifyColor + classifyObject + classifyLocation + classifyStyle + } + } +` \ No newline at end of file diff --git a/ui/src/static/images/pause.svg b/ui/src/static/images/pause.svg new file mode 100644 index 00000000..914f25a7 --- /dev/null +++ b/ui/src/static/images/pause.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/static/images/play.svg b/ui/src/static/images/play.svg new file mode 100644 index 00000000..1bb457c6 --- /dev/null +++ b/ui/src/static/images/play.svg @@ -0,0 +1 @@ + \ No newline at end of file From bb395615be34bbb98f4ee6280b0a13624d88b2d7 Mon Sep 17 00:00:00 2001 From: Gyan P Date: Thu, 15 Apr 2021 21:42:34 +0530 Subject: [PATCH 086/110] add media query for mobile device and handle show/hide menu --- ui/src/components/Header.js | 35 ++++++++++++++-- ui/src/components/Notification.js | 69 ++++++++++++++++--------------- ui/src/components/User.js | 39 +++++------------ 3 files changed, 77 insertions(+), 66 deletions(-) diff --git a/ui/src/components/Header.js b/ui/src/components/Header.js index eadb6225..688ac183 100644 --- a/ui/src/components/Header.js +++ b/ui/src/components/Header.js @@ -1,4 +1,4 @@ -import React from 'react' +import React, { useRef, useState, useEffect } from 'react' import { useSelector } from 'react-redux' import styled from '@emotion/styled' @@ -46,11 +46,38 @@ const Container = styled('div')` flex-grow: 1; } ` +export const useComponentVisible = (initialIsVisible, type) => { + const [isComponentVisible, setIsComponentVisible] = useState(initialIsVisible) + const ref = useRef(null) + + const handleHideDropdown = (event: KeyboardEvent) => { + if (event.key === 'Escape') { + setIsComponentVisible(false) + } + } + + const handleClickOutside = (event) => { + if (ref.current && !ref.current.contains(event.target)) { + setIsComponentVisible(false) + } + } + useEffect(() => { + document.addEventListener('keydown', handleHideDropdown, false) + document.addEventListener('click', handleClickOutside, false) + return () => { + document.removeEventListener('keydown', handleHideDropdown, true) + document.removeEventListener('click', handleClickOutside, true) + } + }) + + return { ref, isComponentVisible, setIsComponentVisible } +} const Header = ({ profile, libraries }) => { const isMobileApp = useSelector(getIsMobileApp) const safeArea = useSelector(getSafeArea) - + const [showNotification, setShowNotification] = useState(false) + const [showUserMenu, setShowUserMenu] = useState(false) return ( { Photonix
- - + + ) } diff --git a/ui/src/components/Notification.js b/ui/src/components/Notification.js index 49cf0755..7781ada6 100644 --- a/ui/src/components/Notification.js +++ b/ui/src/components/Notification.js @@ -1,4 +1,4 @@ -import React, { useRef, useState, useEffect } from 'react' +import React, { useState, useEffect } from 'react' import styled from '@emotion/styled' import { Progress, Box, Flex } from "@chakra-ui/core" import { useQuery, useMutation } from '@apollo/react-hooks' @@ -9,7 +9,7 @@ import play from '../static/images/play.svg' import pause from '../static/images/pause.svg' import { GET_TASK_PROGRESS } from '../graphql/settings' import { getActiveLibrary } from '../stores/libraries/selector' -import { useComponentVisible } from './User' +import { useComponentVisible } from './Header' import { useSettings } from './Settings' import { SETTINGS_STYLE, @@ -28,9 +28,9 @@ const Container = styled('div')` height: 50px; cursor: pointer; } - .userMenu { + .notificationMenu { position: absolute; - width: 290px; + width: 400px; right: 0px; top: 50px; z-index: 10; @@ -40,22 +40,23 @@ const Container = styled('div')` padding: 0; box-shadow: -3px 8px 17px rgba(0, 0, 0, 0.15); } - .isMobileApp header .userMenu { + .isMobileApp header .notificationMenu { top: 80px; } - .userMenu li { + .notificationMenu li { padding: 12px 15px 12px 15px; cursor: default; // display: flex; margin-bottom: 20px; + font-size: 16px; } - .userMenu li:last-child { + .notificationMenu li:last-child { margin-bottom: 10px; } - .userMenu li:hover { + .notificationMenu li:hover { background: rgba(255, 255, 255, 0.1); } - .userMenu li img { + .notificationMenu li img { padding: 0; width: 35px; height: 35px; @@ -64,8 +65,16 @@ const Container = styled('div')` filter: invert(0.9); cursor: pointer; } + @media(max-width:767px) { + .notificationMenu { + width: 290px + } + .notificationMenu li { + font-size: 13px; + } + } ` -const Notification = () => { +const Notification = (props) => { const activeLibrary = useSelector(getActiveLibrary) const [settings, setSettings] = useSettings(activeLibrary) const [showNotificationIcon, setShowNotificationIcon] = useState(true) @@ -75,21 +84,27 @@ const Notification = () => { isComponentVisible, setIsComponentVisible, } = useComponentVisible(false) + const { showNotification, setShowNotification, setShowUserMenu} = props const handleShowMenu = () => { - if (!isComponentVisible) { + if (!showNotification) { setIsComponentVisible(true) + setShowNotification(true) + setShowUserMenu(false) settingsRefetch() } } const { data, refetch } = useQuery(GET_TASK_PROGRESS) - const { data: settingsData, refetch: settingsRefetch } = useQuery(GET_SETTINGS, { + const { refetch: settingsRefetch } = useQuery(GET_SETTINGS, { variables: { libraryId: activeLibrary?.id }, }) const [settingUpdateStyle] = useMutation(SETTINGS_STYLE) const [settingUpdateColor] = useMutation(SETTINGS_COLOR) const [settingUpdateLocation] = useMutation(SETTINGS_LOCATION) const [settingUpdateObject] = useMutation(SETTINGS_OBJECT) - + useEffect(() => { + if (!isComponentVisible) + setShowNotification(false) + }, [isComponentVisible, setShowNotification]) const getTitle = key => { switch(key) { case 'generateThumbnails': @@ -120,6 +135,7 @@ const Notification = () => { getKeys(data).map(key => { if (data.taskProgress[key]?.total > 0) window.sessionStorage.setItem(key, data.taskProgress[key]?.total) + return key }) setFirstRun(false) } @@ -136,6 +152,7 @@ const Notification = () => { } else if(remaining === 0) { window.sessionStorage.setItem(key, 0) } + return key }) } } @@ -175,20 +192,6 @@ const Notification = () => { } } - const getSetting = key => { - switch(key) { - case 'classifyObject': - return settings.classificationObjectEnabled - case 'classifyColor': - return settings.classificationColorEnabled - case 'classifyLocation': - return settings.classificationLocationEnabled - case 'classifyStyle': - return settings.classificationStyleEnabled - default: - return '' - } - } const toggleBooleanSetting = key => { let newSettings = { ...settings } newSettings[getSettingsKey(key)] = !settings[getSettingsKey(key)] @@ -239,8 +242,8 @@ const Notification = () => { Notification
    {data? getNotificationKeys(data).map((key, index) => ( @@ -248,8 +251,8 @@ const Notification = () => { - {getTitle(key)} - + {getTitle(key)} + {data.taskProgress[key]?.total-data.taskProgress[key]?.remaining}/{data.taskProgress[key]?.total} @@ -258,9 +261,9 @@ const Notification = () => { {key !== 'generateThumbnails' && key !== 'processRaw' ? settings[getSettingsKey(key)] ? - toggleBooleanSetting(key)} /> + toggleBooleanSetting(key)} alt="pause" /> : - toggleBooleanSetting(key)} /> + toggleBooleanSetting(key)} alt="play" /> : null } diff --git a/ui/src/components/User.js b/ui/src/components/User.js index 06826977..e969d88a 100644 --- a/ui/src/components/User.js +++ b/ui/src/components/User.js @@ -1,4 +1,4 @@ -import React, { useRef, useState, useEffect } from 'react' +import React, { useEffect } from 'react' import { Link } from 'react-router-dom' import { useDispatch, useSelector } from 'react-redux' import PropTypes from 'prop-types' @@ -10,6 +10,7 @@ import arrowDown from '../static/images/arrow_down.svg' import library from '../static/images/library.svg' import settings from '../static/images/settings.svg' import logout from '../static/images/logout.svg' +import { useComponentVisible } from './Header' const Container = styled('div')` width: 84px; @@ -108,33 +109,7 @@ const Container = styled('div')` } ` -export const useComponentVisible = (initialIsVisible) => { - const [isComponentVisible, setIsComponentVisible] = useState(initialIsVisible) - const ref = useRef(null) - - const handleHideDropdown = (event: KeyboardEvent) => { - if (event.key === 'Escape') { - setIsComponentVisible(false) - } - } - - const handleClickOutside = (event) => { - if (ref.current && !ref.current.contains(event.target)) { - setIsComponentVisible(false) - } - } - useEffect(() => { - document.addEventListener('keydown', handleHideDropdown, false) - document.addEventListener('click', handleClickOutside, false) - return () => { - document.removeEventListener('keydown', handleHideDropdown, true) - document.removeEventListener('click', handleClickOutside, true) - } - }) - - return { ref, isComponentVisible, setIsComponentVisible } -} -const User = ({ profile, libraries }) => { +const User = ({ profile, libraries, showUserMenu ,setShowUserMenu, setShowNotification }) => { const dispatch = useDispatch() const activeLibrary = useSelector(getActiveLibrary) const { @@ -155,14 +130,20 @@ const User = ({ profile, libraries }) => { } const handleShowMenu = () => { setIsComponentVisible(true) + setShowUserMenu(true) + setShowNotification(false) } + useEffect(() => { + if (!isComponentVisible) + setShowUserMenu(false) + }, [isComponentVisible, setShowUserMenu]) return ( User account
      {profile ? ( From b3eba054ba04cda600dc3a49994f0154c7298ee5 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Mon, 3 May 2021 16:04:24 +0100 Subject: [PATCH 087/110] Upgrades for Apollo v3 --- ui/src/components/Notification.js | 2 +- ui/src/components/Settings.js | 53 ++++++++++++++++++++----------- 2 files changed, 36 insertions(+), 19 deletions(-) diff --git a/ui/src/components/Notification.js b/ui/src/components/Notification.js index 7781ada6..dbf97e18 100644 --- a/ui/src/components/Notification.js +++ b/ui/src/components/Notification.js @@ -1,7 +1,7 @@ import React, { useState, useEffect } from 'react' import styled from '@emotion/styled' import { Progress, Box, Flex } from "@chakra-ui/core" -import { useQuery, useMutation } from '@apollo/react-hooks' +import { useQuery, useMutation } from '@apollo/client' import { useSelector } from 'react-redux' import notifications from '../static/images/notifications.svg' diff --git a/ui/src/components/Settings.js b/ui/src/components/Settings.js index d3458d46..5f47cdb0 100644 --- a/ui/src/components/Settings.js +++ b/ui/src/components/Settings.js @@ -201,31 +201,48 @@ export const useSettings = (activeLibrary) => { // console.log(error) const isInitialMount = useRef(true) + // useEffect(() => { + // refetch() + // }, [activeLibrary, refetch]) + useEffect(() => { - refetch() - }, [activeLibrary, refetch]) + if (activeLibrary && !loading) { + refetch() + } + }, [activeLibrary, loading, refetch]) useEffect(() => { - if (isInitialMount.current) { - isInitialMount.current = false - } else { - if (!loading && data) { - let setting = { ...data.librarySetting.library } - setting.sourceDirs = data.librarySetting.sourceFolder - setSettings(setting) + // if (isInitialMount.current) { + // isInitialMount.current = false + // } else { + if (!loading && data) { + let setting = { ...data.librarySetting.library } + setting.sourceDirs = data.librarySetting.sourceFolder + setSettings(setting) } - } - // TODO: Re-sync with desktop app - // if (window.sendSyncToElectron) { - // let result = window.sendSyncToElectron('get-settings') - // setSettings(result) - // } }, [data, loading]) + + // useEffect(() => { + // if (activeLibrary) { + // refetch() + // } + // if (!loading) { + // let setting = {...data.librarySetting.library} + // setting.sourceDirs = data.librarySetting.sourceFolder + // setSettings(setting) + // } + // if (window.sendSyncToElectron) { + // let result = window.sendSyncToElectron('get-settings') + // setSettings(result) + // } + // }, [activeLibrary, loading, refetch, data]) + + function setAndSaveSettings(newSettings) { - if (window.sendSyncToElectron) { - window.sendSyncToElectron('set-settings', newSettings) - } + // if (window.sendSyncToElectron) { + // window.sendSyncToElectron('set-settings', newSettings) + // } setSettings(newSettings) } return [existingSettings, setAndSaveSettings] From 10cbd9a974e14bf7e50de8a56835629f9e01c483 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Mon, 3 May 2021 19:34:16 +0100 Subject: [PATCH 088/110] Modifications around UI --- ui/src/components/Notification.js | 188 +++++++++++++++++------------- ui/src/components/Settings.js | 76 ++++++------ 2 files changed, 146 insertions(+), 118 deletions(-) diff --git a/ui/src/components/Notification.js b/ui/src/components/Notification.js index dbf97e18..c3ad7400 100644 --- a/ui/src/components/Notification.js +++ b/ui/src/components/Notification.js @@ -1,6 +1,6 @@ import React, { useState, useEffect } from 'react' import styled from '@emotion/styled' -import { Progress, Box, Flex } from "@chakra-ui/core" +import { Progress, Box, Flex } from '@chakra-ui/core' import { useQuery, useMutation } from '@apollo/client' import { useSelector } from 'react-redux' @@ -16,12 +16,12 @@ import { SETTINGS_COLOR, SETTINGS_LOCATION, SETTINGS_OBJECT, - GET_SETTINGS + GET_SETTINGS, } from '../graphql/settings' const Container = styled('div')` - margin-right:10px; - > img { + margin-right: 10px; + > img { filter: invert(0.9); padding: 10px 0 10px 10px; width: 50px; @@ -65,9 +65,9 @@ const Container = styled('div')` filter: invert(0.9); cursor: pointer; } - @media(max-width:767px) { + @media (max-width: 767px) { .notificationMenu { - width: 290px + width: 290px; } .notificationMenu li { font-size: 13px; @@ -78,13 +78,12 @@ const Notification = (props) => { const activeLibrary = useSelector(getActiveLibrary) const [settings, setSettings] = useSettings(activeLibrary) const [showNotificationIcon, setShowNotificationIcon] = useState(true) - const [firstRun, setFirstRun] = useState(true) const { ref, isComponentVisible, setIsComponentVisible, } = useComponentVisible(false) - const { showNotification, setShowNotification, setShowUserMenu} = props + const { showNotification, setShowNotification, setShowUserMenu } = props const handleShowMenu = () => { if (!showNotification) { setIsComponentVisible(true) @@ -102,11 +101,10 @@ const Notification = (props) => { const [settingUpdateLocation] = useMutation(SETTINGS_LOCATION) const [settingUpdateObject] = useMutation(SETTINGS_OBJECT) useEffect(() => { - if (!isComponentVisible) - setShowNotification(false) + if (!isComponentVisible) setShowNotification(false) }, [isComponentVisible, setShowNotification]) - const getTitle = key => { - switch(key) { + const getTitle = (key) => { + switch (key) { case 'generateThumbnails': return 'Generating thumbnails' case 'processRaw': @@ -120,65 +118,74 @@ const Notification = (props) => { case 'classifyStyle': return 'Analysing styles' default: - return ''; + return '' } } - + const getKeys = (data) => { let keys = Object.keys(data.taskProgress) - keys.splice(keys.length-1) + keys.splice(keys.length - 1) return keys } - + useEffect(() => { - if (data && firstRun) { - getKeys(data).map(key => { - if (data.taskProgress[key]?.total > 0) - window.sessionStorage.setItem(key, data.taskProgress[key]?.total) - return key - }) - setFirstRun(false) - } - }, [data, firstRun]) - - const refetchTasks = () => { - refetch() - if (data && !firstRun) { - getKeys(data).map(key => { - const sessionVal = window.sessionStorage.getItem(key) - const remaining = data.taskProgress[key]?.remaining - if (remaining > sessionVal) { - window.sessionStorage.setItem(key, data.taskProgress[key]?.total) - } else if(remaining === 0) { + if (data) { + console.log(data) + getKeys(data).map((key) => { + let remaining = data.taskProgress[key]?.remaining + if (remaining === 0) { window.sessionStorage.setItem(key, 0) + } else if (remaining > window.sessionStorage.getItem(key)) { + window.sessionStorage.setItem(key, remaining) } return key }) } - } + }, [data]) + + // const refetchTasks = () => { + // refetch() + // if (data) { + // getKeys(data).map((key) => { + // const sessionVal = window.sessionStorage.getItem(key) + // const remaining = data.taskProgress[key]?.remaining + // if (remaining > sessionVal) { + // window.sessionStorage.setItem(key, data.taskProgress[key]?.total) + // } else if (remaining === 0) { + // window.sessionStorage.setItem(key, 0) + // } + // return key + // }) + // } + // } useEffect(() => { - let handle = setInterval(refetchTasks, 60000) + let handle = setInterval(refetch, 15000) return () => { clearInterval(handle) } }) - const getNotificationKeys = (data) =>{ + const getNotificationKeys = (data) => { const keys = getKeys(data) - const remaining = keys.filter(k => data.taskProgress[k].remaining > 0) + const remaining = keys.filter((k) => data.taskProgress[k].remaining > 0) if (remaining.length) { !showNotificationIcon && setShowNotificationIcon(true) - } else { + } else { showNotificationIcon && setShowNotificationIcon(false) - } + } return remaining } - - const getProgressPercent = key => { - return ((window.sessionStorage.getItem(key) - data.taskProgress[key]?.remaining) / window.sessionStorage.getItem(key)) * 100 + + const getProgressPercent = (key) => { + return ( + ((window.sessionStorage.getItem(key) - + data.taskProgress[key]?.remaining) / + window.sessionStorage.getItem(key)) * + 100 + ) } - const getSettingsKey = key => { - switch(key) { + const getSettingsKey = (key) => { + switch (key) { case 'classifyObject': return 'classificationObjectEnabled' case 'classifyColor': @@ -192,7 +199,7 @@ const Notification = (props) => { } } - const toggleBooleanSetting = key => { + const toggleBooleanSetting = (key) => { let newSettings = { ...settings } newSettings[getSettingsKey(key)] = !settings[getSettingsKey(key)] setSettings(newSettings) @@ -238,43 +245,62 @@ const Notification = (props) => { return ( <> - {showNotificationIcon ? - - Notification -
        - {data? - getNotificationKeys(data).map((key, index) => ( -
      • - - - - {getTitle(key)} - - {data.taskProgress[key]?.total-data.taskProgress[key]?.remaining}/{data.taskProgress[key]?.total} + Notification +
          + {data + ? getNotificationKeys(data).map((key, index) => ( +
        • + + + + {getTitle(key)} + + {window.sessionStorage.getItem(key) - + data.taskProgress[key]?.remaining} + /{window.sessionStorage.getItem(key)} + + + + + + {key !== 'generateThumbnails' && + key !== 'processRaw' ? ( + settings[getSettingsKey(key)] ? ( + toggleBooleanSetting(key)} + alt="pause" + /> + ) : ( + toggleBooleanSetting(key)} + alt="play" + /> + ) + ) : null} - - - - {key !== 'generateThumbnails' && key !== 'processRaw' ? - settings[getSettingsKey(key)] ? - toggleBooleanSetting(key)} alt="pause" /> - : - toggleBooleanSetting(key)} alt="play" /> - : - null - } - - -
        • - )) - : null} -
        - - : null } +
      • + )) + : null} +
      +
      + ) : null} ) } diff --git a/ui/src/components/Settings.js b/ui/src/components/Settings.js index 5f47cdb0..60b1c964 100644 --- a/ui/src/components/Settings.js +++ b/ui/src/components/Settings.js @@ -1,4 +1,4 @@ -import React, { useState, useEffect, useRef } from 'react' +import React, { useState, useEffect } from 'react' import { useQuery, useMutation } from '@apollo/client' import { useSelector } from 'react-redux' import { getActiveLibrary } from '../stores/libraries/selector' @@ -8,9 +8,9 @@ import { Flex, Stack, FormLabel, - Input, - InputGroup, - IconButton, + // Input, + // InputGroup, + // IconButton, } from '@chakra-ui/core' import Modal from './Modal' @@ -20,7 +20,7 @@ import { SETTINGS_LOCATION, SETTINGS_OBJECT, SETTINGS_FACE, - SETTINGS_SOURCE_FOLDER, + // SETTINGS_SOURCE_FOLDER, GET_SETTINGS, } from '../graphql/settings' // import folder from '../static/images/folder.svg' @@ -119,30 +119,32 @@ export default function Settings() { } } - function onSelectSourceDir() { - if (window.sendSyncToElectron) { - let dirs = window.sendSyncToElectron('select-dir') - setSettings({ sourceDirs: dirs }) - } - } + // TODO: Re-implement desktop app settings integration + // function onSelectSourceDir() { + // if (window.sendSyncToElectron) { + // let dirs = window.sendSyncToElectron('select-dir') + // setSettings({ sourceDirs: dirs }) + // } + // } + + // function onChangeSourceDir(e) { + // let newSettings = { ...settings } + // newSettings.sourceDirs = e.currentTarget.value + // setSettings(newSettings) + // settingUpdateSourceFolder({ + // variables: { + // sourceFolder: newSettings.sourceDirs, + // libraryId: activeLibrary?.id, + // }, + // }).catch((e) => {}) + // } - function onChangeSourceDir(e) { - let newSettings = { ...settings } - newSettings.sourceDirs = e.currentTarget.value - setSettings(newSettings) - settingUpdateSourceFolder({ - variables: { - sourceFolder: newSettings.sourceDirs, - libraryId: activeLibrary?.id, - }, - }).catch((e) => {}) - } const [settingUpdateStyle] = useMutation(SETTINGS_STYLE) const [settingUpdateColor] = useMutation(SETTINGS_COLOR) const [settingUpdateLocation] = useMutation(SETTINGS_LOCATION) const [settingUpdateObject] = useMutation(SETTINGS_OBJECT) const [settingUpdateFace] = useMutation(SETTINGS_FACE) - const [settingUpdateSourceFolder] = useMutation(SETTINGS_SOURCE_FOLDER) + // const [settingUpdateSourceFolder] = useMutation(SETTINGS_SOURCE_FOLDER) return ( @@ -154,20 +156,20 @@ export default function Settings() { if (settings) { if (item.type === 'path') { - field = ( - - - - - ) + // field = ( + // + // + // + // + // ) } else if (item.type === 'boolean') { field = ( Date: Thu, 15 Jul 2021 18:25:51 +0530 Subject: [PATCH 089/110] resolved error 'useRef not defined' after resolving conflicts --- ui/src/components/Settings.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ui/src/components/Settings.js b/ui/src/components/Settings.js index 60b1c964..332d0c48 100644 --- a/ui/src/components/Settings.js +++ b/ui/src/components/Settings.js @@ -1,4 +1,4 @@ -import React, { useState, useEffect } from 'react' +import React, { useState, useEffect, useRef } from 'react' import { useQuery, useMutation } from '@apollo/client' import { useSelector } from 'react-redux' import { getActiveLibrary } from '../stores/libraries/selector' From 4b03b229ea2de1adff526b24ba1ed0606da2416a Mon Sep 17 00:00:00 2001 From: GyanP Date: Fri, 16 Jul 2021 16:36:17 +0530 Subject: [PATCH 090/110] task done and changes pushed on branch 192-progress-notifications --- photonix/photos/schema.py | 4 +++- ui/src/components/BoundingBoxes.js | 4 ++-- ui/src/components/Notification.js | 18 ++++++++++++++++-- ui/src/graphql/settings.js | 1 + 4 files changed, 22 insertions(+), 5 deletions(-) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 81121f7b..2e06dd51 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -219,6 +219,7 @@ class TaskType(graphene.ObjectType): classify_location = graphene.types.generic.GenericScalar() classify_object = graphene.types.generic.GenericScalar() classify_style = graphene.types.generic.GenericScalar() + classify_face = graphene.types.generic.GenericScalar() class Query(graphene.ObjectType): @@ -474,7 +475,8 @@ def resolve_task_progress(self, info, **kwargs): "classify_color": count_remaining_task('classify.color'), "classify_location": count_remaining_task('classify.location'), "classify_object": count_remaining_task('classify.object'), - "classify_style": count_remaining_task('classify.style')} + "classify_style": count_remaining_task('classify.style'), + "classify_face": count_remaining_task('classify.face')} class LibraryInput(graphene.InputObjectType): diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index a2ea1356..53a81bac 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -160,7 +160,7 @@ const BoundingBoxes = ({ } const onChangeLable = (event, photoTagId) => { - ;(event.keyCode === ENTER_KEY && + (event.keyCode === ENTER_KEY && ref.current.value && onSaveLable(event, photoTagId)) || (event.keyCode === ESCAPE_KEY && setEditLableId('')) @@ -201,7 +201,7 @@ const BoundingBoxes = ({ let top = (box.positionY - box.sizeY / 2) * 100 + '%' let width = box.sizeX * 100 + '%' let height = box.sizeY * 100 + '%' - console.log(box) + // console.log(box) return (
      { const [settingUpdateColor] = useMutation(SETTINGS_COLOR) const [settingUpdateLocation] = useMutation(SETTINGS_LOCATION) const [settingUpdateObject] = useMutation(SETTINGS_OBJECT) + const [settingUpdateFace] = useMutation(SETTINGS_FACE) useEffect(() => { if (!isComponentVisible) setShowNotification(false) }, [isComponentVisible, setShowNotification]) @@ -117,6 +119,8 @@ const Notification = (props) => { return 'Analysing locations' case 'classifyStyle': return 'Analysing styles' + case 'classifyFace': + return 'Analysing faces' default: return '' } @@ -124,13 +128,13 @@ const Notification = (props) => { const getKeys = (data) => { let keys = Object.keys(data.taskProgress) - keys.splice(keys.length - 1) + // keys.splice(keys.length - 1) return keys } useEffect(() => { if (data) { - console.log(data) + // console.log(data) getKeys(data).map((key) => { let remaining = data.taskProgress[key]?.remaining if (remaining === 0) { @@ -194,6 +198,8 @@ const Notification = (props) => { return 'classificationLocationEnabled' case 'classifyStyle': return 'classificationStyleEnabled' + case 'classifyFace': + return 'classificationFaceEnabled' default: return null } @@ -238,6 +244,14 @@ const Notification = (props) => { }, }).catch((e) => {}) return key + case 'classificationFaceEnabled': + settingUpdateFace({ + variables: { + classificationFaceEnabled:newSettings.classificationFaceEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key default: return null } diff --git a/ui/src/graphql/settings.js b/ui/src/graphql/settings.js index 06d2debe..516e5b4e 100644 --- a/ui/src/graphql/settings.js +++ b/ui/src/graphql/settings.js @@ -109,6 +109,7 @@ export const GET_TASK_PROGRESS = gql` classifyObject classifyLocation classifyStyle + classifyFace } } ` \ No newline at end of file From b4528b70c3b23a1526f6f27f029afc59d6779af0 Mon Sep 17 00:00:00 2001 From: GyanP Date: Mon, 19 Jul 2021 23:56:05 +0530 Subject: [PATCH 091/110] task changes done and pushed --- ui/src/components/Notification.js | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/ui/src/components/Notification.js b/ui/src/components/Notification.js index e56c36f3..770d2fe8 100644 --- a/ui/src/components/Notification.js +++ b/ui/src/components/Notification.js @@ -257,6 +257,16 @@ const Notification = (props) => { } } + const getRemaining = (remaining, totalRunning) => { + return remaining === '0' + ? '1' + : Math.abs(parseInt(remaining) - parseInt(totalRunning)) + } + + const getTotalRunning = (remaining, totalRunning) => { + return totalRunning === '0' ? remaining : totalRunning + } + return ( <> {showNotificationIcon ? ( @@ -278,9 +288,8 @@ const Notification = (props) => { {getTitle(key)} - {window.sessionStorage.getItem(key) - - data.taskProgress[key]?.remaining} - /{window.sessionStorage.getItem(key)} + {getRemaining(data.taskProgress[key]?.remaining, window.sessionStorage.getItem(key))} + /{getTotalRunning(data.taskProgress[key]?.remaining, window.sessionStorage.getItem(key))} Date: Tue, 20 Jul 2021 21:21:22 +0530 Subject: [PATCH 092/110] task completed --- photonix/photos/utils/db.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/photonix/photos/utils/db.py b/photonix/photos/utils/db.py index 223685db..34af7310 100644 --- a/photonix/photos/utils/db.py +++ b/photonix/photos/utils/db.py @@ -2,7 +2,7 @@ from decimal import Decimal import imghdr import mimetypes -import os +import os, time import re import subprocess @@ -55,11 +55,13 @@ def record_photo(path, library, inotify_event_type=None): metadata = PhotoMetadata(path) date_taken = None - possible_date_keys = ['Date/Time Original', 'Date Time Original', 'Date/Time', 'Date Time', 'GPS Date/Time', 'Modify Date', 'File Modification Date/Time'] + possible_date_keys = ['Create Date', 'Date/Time Original', 'Date Time Original', 'Date/Time', 'Date Time', 'GPS Date/Time', 'File Modification Date/Time'] for date_key in possible_date_keys: date_taken = parse_datetime(metadata.get(date_key)) if date_taken: break + # If EXIF data not found. + date_taken = date_taken or datetime.strptime(time.ctime(os.path.getctime(path)), "%a %b %d %H:%M:%S %Y") camera = None camera_make = metadata.get('Make', '')[:Camera.make.field.max_length] From 784dc92cf3d7cea7b9bfb146abc1f5f9d33da6f7 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 22 Jul 2021 20:55:23 +0100 Subject: [PATCH 093/110] Tidy-ups to progress bar display --- ui/src/components/BoundingBoxes.js | 2 +- ui/src/components/Notification.js | 104 ++++++++++++-------------- ui/src/components/Settings.js | 10 +-- ui/src/containers/FiltersContainer.js | 1 - 4 files changed, 52 insertions(+), 65 deletions(-) diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 53a81bac..76eb9f33 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -1,4 +1,4 @@ -import React, { useState, useEffect, useRef } from 'react' +import React, { useEffect, useRef } from 'react' import styled from '@emotion/styled' import { useMutation } from '@apollo/client' import { useDispatch, useSelector } from 'react-redux' diff --git a/ui/src/components/Notification.js b/ui/src/components/Notification.js index 770d2fe8..e20841f6 100644 --- a/ui/src/components/Notification.js +++ b/ui/src/components/Notification.js @@ -47,13 +47,8 @@ const Container = styled('div')` .notificationMenu li { padding: 12px 15px 12px 15px; cursor: default; - // display: flex; - margin-bottom: 20px; font-size: 16px; } - .notificationMenu li:last-child { - margin-bottom: 10px; - } .notificationMenu li:hover { background: rgba(255, 255, 255, 0.1); } @@ -78,13 +73,11 @@ const Container = styled('div')` const Notification = (props) => { const activeLibrary = useSelector(getActiveLibrary) const [settings, setSettings] = useSettings(activeLibrary) - const [showNotificationIcon, setShowNotificationIcon] = useState(true) - const { - ref, - isComponentVisible, - setIsComponentVisible, - } = useComponentVisible(false) + const [showNotificationIcon, setShowNotificationIcon] = useState(false) + const { ref, isComponentVisible, setIsComponentVisible } = + useComponentVisible(false) const { showNotification, setShowNotification, setShowUserMenu } = props + const handleShowMenu = () => { if (!showNotification) { setIsComponentVisible(true) @@ -93,6 +86,7 @@ const Notification = (props) => { settingsRefetch() } } + const { data, refetch } = useQuery(GET_TASK_PROGRESS) const { refetch: settingsRefetch } = useQuery(GET_SETTINGS, { variables: { libraryId: activeLibrary?.id }, @@ -102,9 +96,34 @@ const Notification = (props) => { const [settingUpdateLocation] = useMutation(SETTINGS_LOCATION) const [settingUpdateObject] = useMutation(SETTINGS_OBJECT) const [settingUpdateFace] = useMutation(SETTINGS_FACE) + + useEffect(() => { + const interval = isComponentVisible ? 3000 : 15000 + let handle = setInterval(refetch, interval) + return () => { + clearInterval(handle) + } + }) + useEffect(() => { if (!isComponentVisible) setShowNotification(false) }, [isComponentVisible, setShowNotification]) + + useEffect(() => { + if (data) { + getKeys(data).map((key) => { + let remaining = data.taskProgress[key]?.remaining + if (remaining === 0) { + window.sessionStorage.setItem(key, 0) + } else if (remaining > window.sessionStorage.getItem(key)) { + window.sessionStorage.setItem(key, remaining) + !showNotificationIcon && setShowNotificationIcon(true) + } + return key + }) + } + }, [data, showNotificationIcon]) + const getTitle = (key) => { switch (key) { case 'generateThumbnails': @@ -112,15 +131,15 @@ const Notification = (props) => { case 'processRaw': return 'Processing raw files' case 'classifyColor': - return 'Analysing colors' + return 'Analyzing colors' case 'classifyObject': - return 'Analysing objects' + return 'Analyzing objects' case 'classifyLocation': - return 'Analysing locations' + return 'Analyzing locations' case 'classifyStyle': - return 'Analysing styles' + return 'Analyzing styles' case 'classifyFace': - return 'Analysing faces' + return 'Analyzing faces' default: return '' } @@ -128,47 +147,9 @@ const Notification = (props) => { const getKeys = (data) => { let keys = Object.keys(data.taskProgress) - // keys.splice(keys.length - 1) return keys } - useEffect(() => { - if (data) { - // console.log(data) - getKeys(data).map((key) => { - let remaining = data.taskProgress[key]?.remaining - if (remaining === 0) { - window.sessionStorage.setItem(key, 0) - } else if (remaining > window.sessionStorage.getItem(key)) { - window.sessionStorage.setItem(key, remaining) - } - return key - }) - } - }, [data]) - - // const refetchTasks = () => { - // refetch() - // if (data) { - // getKeys(data).map((key) => { - // const sessionVal = window.sessionStorage.getItem(key) - // const remaining = data.taskProgress[key]?.remaining - // if (remaining > sessionVal) { - // window.sessionStorage.setItem(key, data.taskProgress[key]?.total) - // } else if (remaining === 0) { - // window.sessionStorage.setItem(key, 0) - // } - // return key - // }) - // } - // } - useEffect(() => { - let handle = setInterval(refetch, 15000) - return () => { - clearInterval(handle) - } - }) - const getNotificationKeys = (data) => { const keys = getKeys(data) const remaining = keys.filter((k) => data.taskProgress[k].remaining > 0) @@ -176,6 +157,7 @@ const Notification = (props) => { !showNotificationIcon && setShowNotificationIcon(true) } else { showNotificationIcon && setShowNotificationIcon(false) + isComponentVisible && setIsComponentVisible(false) } return remaining } @@ -188,6 +170,7 @@ const Notification = (props) => { 100 ) } + const getSettingsKey = (key) => { switch (key) { case 'classifyObject': @@ -247,7 +230,7 @@ const Notification = (props) => { case 'classificationFaceEnabled': settingUpdateFace({ variables: { - classificationFaceEnabled:newSettings.classificationFaceEnabled, + classificationFaceEnabled: newSettings.classificationFaceEnabled, libraryId: activeLibrary?.id, }, }).catch((e) => {}) @@ -288,8 +271,15 @@ const Notification = (props) => { {getTitle(key)} - {getRemaining(data.taskProgress[key]?.remaining, window.sessionStorage.getItem(key))} - /{getTotalRunning(data.taskProgress[key]?.remaining, window.sessionStorage.getItem(key))} + {getRemaining( + data.taskProgress[key]?.remaining, + window.sessionStorage.getItem(key) + )} + / + {getTotalRunning( + data.taskProgress[key]?.remaining, + window.sessionStorage.getItem(key) + )} { variables: { libraryId: activeLibrary?.id }, }) // console.log(error) - const isInitialMount = useRef(true) - + // const isInitialMount = useRef(true) + // useEffect(() => { // refetch() // }, [activeLibrary, refetch]) @@ -221,10 +221,9 @@ export const useSettings = (activeLibrary) => { let setting = { ...data.librarySetting.library } setting.sourceDirs = data.librarySetting.sourceFolder setSettings(setting) - } + } }, [data, loading]) - // useEffect(() => { // if (activeLibrary) { // refetch() @@ -240,7 +239,6 @@ export const useSettings = (activeLibrary) => { // } // }, [activeLibrary, loading, refetch, data]) - function setAndSaveSettings(newSettings) { // if (window.sendSyncToElectron) { // window.sendSyncToElectron('set-settings', newSettings) diff --git a/ui/src/containers/FiltersContainer.js b/ui/src/containers/FiltersContainer.js index a27bb4fc..05d1e9e5 100644 --- a/ui/src/containers/FiltersContainer.js +++ b/ui/src/containers/FiltersContainer.js @@ -118,7 +118,6 @@ const FiltersContainer = ({ useEffect(() => { if (isFiltersAvail && filterData.length) { - console.log(REMOVABLE_TAGS) const autoSuggestionFilters = filterData.filter((f) => { return REMOVABLE_TAGS.indexOf(f.name) === -1 }) From 4b9ef8e577bc5955a716c023337938bbb36d960e Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Thu, 22 Jul 2021 21:06:21 +0100 Subject: [PATCH 094/110] Tidy-ups to progress bar display --- photonix/photos/utils/tasks.py | 1 - 1 file changed, 1 deletion(-) diff --git a/photonix/photos/utils/tasks.py b/photonix/photos/utils/tasks.py index 11489fe6..2f1c10bf 100644 --- a/photonix/photos/utils/tasks.py +++ b/photonix/photos/utils/tasks.py @@ -16,5 +16,4 @@ def requeue_stuck_tasks(task_type, age_hours=24, max_num=8): def count_remaining_task(task_type): """Returned count of remaining task.""" return { - 'total': Task.objects.filter(type=task_type).count(), 'remaining': Task.objects.filter(Q(type=task_type), Q(status='P') | Q(status='S')).count()} From 3d2770434762f0596f2de32e7af29fc99c03bcd7 Mon Sep 17 00:00:00 2001 From: Fred Hoogduin Date: Thu, 15 Jul 2021 16:45:19 +0200 Subject: [PATCH 095/110] Update metadata.py never crash on random strings in exif date fields --- photonix/photos/utils/metadata.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/photonix/photos/utils/metadata.py b/photonix/photos/utils/metadata.py index 8fa14e5c..ccaaa1b3 100644 --- a/photonix/photos/utils/metadata.py +++ b/photonix/photos/utils/metadata.py @@ -43,10 +43,13 @@ def parse_datetime(date_str): try: return datetime.strptime(date_str, '%Y:%m:%d %H:%M:%S').replace(tzinfo=utc) except ValueError: - parsed_date = parse_date(date_str) - if not parsed_date.tzinfo: - parsed_date = parsed_date.replace(tzinfo=utc) - return parsed_date + try: + parsed_date = parse_date(date_str) + if not parsed_date.tzinfo: + parsed_date = parsed_date.replace(tzinfo=utc) + return parsed_date + except ValueError: + return None def parse_gps_location(gps_str): From 14d44346516ae684dc51c2f52a8f7dedb3714269 Mon Sep 17 00:00:00 2001 From: Fred Hoogduin Date: Mon, 26 Jul 2021 17:24:33 +0200 Subject: [PATCH 096/110] never crash when images contain identical subject tags (type Generic) --- photonix/photos/utils/db.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/photonix/photos/utils/db.py b/photonix/photos/utils/db.py index 223685db..2ac511ae 100644 --- a/photonix/photos/utils/db.py +++ b/photonix/photos/utils/db.py @@ -152,7 +152,7 @@ def record_photo(path, library, inotify_event_type=None): for subject in metadata.get('Subject', '').split(','): subject = subject.strip() if subject: - tag = Tag.objects.create(library_id=library_id, name=subject, type="G") + tag, _ = Tag.objects.get_or_create(library_id=library_id, name=subject, type="G") PhotoTag.objects.create( photo=photo, tag=tag, From 80190823b392f38d16d493b243589dfd722f448c Mon Sep 17 00:00:00 2001 From: Fred Hoogduin Date: Mon, 26 Jul 2021 17:28:03 +0200 Subject: [PATCH 097/110] decode bytes result from exiftool while ignoring (skipping) invalid utf-8 sequences --- photonix/photos/utils/metadata.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/photonix/photos/utils/metadata.py b/photonix/photos/utils/metadata.py index 8fa14e5c..143c49cb 100644 --- a/photonix/photos/utils/metadata.py +++ b/photonix/photos/utils/metadata.py @@ -13,7 +13,7 @@ def __init__(self, path): self.data = {} try: # exiftool produces data such as MIME Type for non-photos too - result = Popen(['exiftool', path], stdout=PIPE, stdin=PIPE, stderr=PIPE).communicate()[0].decode('utf-8') + result = Popen(['exiftool', path], stdout=PIPE, stdin=PIPE, stderr=PIPE).communicate()[0].decode('utf-8', 'ignore') except UnicodeDecodeError: result = '' for line in str(result).split('\n'): From fb11196be1772a8e3fdecc233f2c22eb1089a499 Mon Sep 17 00:00:00 2001 From: Damian Moore Date: Sun, 8 Aug 2021 18:23:27 +0100 Subject: [PATCH 098/110] Separately allow sample photos but not having to run restricted demo mode --- docker/docker-compose.dev.yml | 2 +- photonix/accounts/schema.py | 23 ++++++++++++++++++----- system/run.sh | 4 ++-- ui/src/components/Login.js | 5 +++-- 4 files changed, 24 insertions(+), 10 deletions(-) diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 46102425..2a434e84 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -32,7 +32,7 @@ services: - '8880:8001' environment: ENV: dev - DEMO: 1 + SAMPLE_DATA: 1 POSTGRES_HOST: postgres POSTGRES_DB: photonix POSTGRES_USER: postgres diff --git a/photonix/accounts/schema.py b/photonix/accounts/schema.py index 9d6a822a..2446a879 100644 --- a/photonix/accounts/schema.py +++ b/photonix/accounts/schema.py @@ -47,6 +47,7 @@ def mutate(self, info, username, password, password1): class Environment(graphene.ObjectType): demo = graphene.Boolean() + sample_data = graphene.Boolean() first_run = graphene.Boolean() form = graphene.String() user_id = graphene.ID() @@ -73,32 +74,44 @@ def resolve_profile(self, info): def resolve_environment(self, info): user = User.objects.first() + demo = os.environ.get('DEMO', False) + sample_data = os.environ.get('DEMO', False) or os.environ.get('SAMPLE_DATA', False) + if user and user.has_config_persional_info and \ user.has_created_library and user.has_configured_importing and \ user.has_configured_image_analysis: return { - 'demo': os.environ.get('DEMO', False), + 'demo': demo, + 'sample_data': sample_data, 'first_run': False, } else: if not user or not user.is_authenticated: return { - 'demo': os.environ.get('DEMO', False), 'first_run': True, + 'demo': demo, + 'sample_data': sample_data, + 'first_run': True, 'form': 'has_config_persional_info'} if not user.has_created_library: return { - 'demo': os.environ.get('DEMO', False), 'first_run': True, + 'demo': demo, + 'sample_data': sample_data, + 'first_run': True, 'form': 'has_created_library', 'user_id': user.id} if not user.has_configured_importing: return { - 'demo': os.environ.get('DEMO', False), 'first_run': True, + 'demo': demo, + 'sample_data': sample_data, + 'first_run': True, 'form': 'has_configured_importing', 'user_id': user.id, 'library_id': Library.objects.filter(users__user=user)[0].id, 'library_path_id': LibraryPath.objects.filter(library__users__user=user)[0].id } if not user.has_configured_image_analysis: return { - 'demo': os.environ.get('DEMO', False), 'first_run': True, + 'demo': demo, + 'sample_data': sample_data, + 'first_run': True, 'form': 'has_configured_image_analysis', 'user_id': user.id, 'library_id': Library.objects.filter(users__user=user)[0].id, } diff --git a/system/run.sh b/system/run.sh index eb2f2cc3..d8e284a5 100755 --- a/system/run.sh +++ b/system/run.sh @@ -17,8 +17,8 @@ if [ "${ADMIN_PASSWORD}" != "" ]; then python /srv/photonix/manage.py create_admin_from_env fi -if [ "${DEMO}" = "1" ]; then - echo "Ensuring demo user, library and photos are created as we're running with DEMO=1 environment variable" +if [ "${DEMO}" = "1" ] || [ "${SAMPLE_DATA}" = "1" ]; then + echo "Ensuring demo user, library and photos are created as we're running with DEMO=1 or SAMPLE_DATA=1 environment variable" python /srv/photonix/manage.py import_demo_photos fi diff --git a/ui/src/components/Login.js b/ui/src/components/Login.js index df58d157..a3af054f 100644 --- a/ui/src/components/Login.js +++ b/ui/src/components/Login.js @@ -52,6 +52,7 @@ const ENVIRONMENT = gql` { environment { demo + sampleData firstRun form userId @@ -134,7 +135,7 @@ const Login = (props) => { ref={(node) => { inputUsername = node }} - defaultValue={envData && envData.environment.demo ? 'demo' : ''} + defaultValue={envData && (envData.environment.demo || envData.environment.sampleData) ? 'demo' : ''} /> @@ -144,7 +145,7 @@ const Login = (props) => { ref={(node) => { inputPassword = node }} - defaultValue={envData && envData.environment.demo ? 'demo' : ''} + defaultValue={envData && (envData.environment.demo || envData.environment.sampleData) ? 'demo' : ''} />