diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml new file mode 100644 index 00000000..e7ec037d --- /dev/null +++ b/.github/FUNDING.yml @@ -0,0 +1 @@ +github: [photonixapp] diff --git a/Makefile b/Makefile index a19ca349..9443022b 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,9 @@ restart: shell: $(DOCKER_COMPOSE_DEV) exec photonix bash +shell-prd: + $(DOCKER_COMPOSE_PRD) exec photonix bash + manage: $(DOCKER_COMPOSE_DEV) exec photonix python photonix/manage.py ${} diff --git a/docker/Dockerfile.dev b/docker/Dockerfile.dev index bce3561d..c9d4d89e 100644 --- a/docker/Dockerfile.dev +++ b/docker/Dockerfile.dev @@ -2,10 +2,12 @@ FROM python:3.8.9-slim-buster # Install system dependencies - note that some of these are only used on non-amd64 where Python packages have to be compiled from source RUN apt-get update && \ - apt-get install -y \ + apt-get install -y --no-install-recommends \ build-essential \ + cron \ curl \ dcraw \ + file \ git \ gnupg \ libatlas-base-dev \ @@ -14,7 +16,10 @@ RUN apt-get update && \ libblas3 \ libfreetype6 \ libfreetype6-dev \ + libgl1 \ + libglib2.0-dev \ libhdf5-dev \ + libheif-examples \ libimage-exiftool-perl \ libjpeg-dev \ liblapack-dev \ @@ -66,6 +71,7 @@ RUN cd /srv/ui && yarn install # Copy over the code COPY photonix /srv/photonix COPY test.py /srv/test.py +COPY manage.py /srv/manage.py COPY tests /srv/tests COPY ui/public /srv/ui/public COPY ui/src /srv/ui/src @@ -74,6 +80,10 @@ COPY ui/src /srv/ui/src COPY system /srv/system COPY system/supervisord.conf /etc/supervisord.conf +# Copy crontab +COPY system/cron.d /etc/cron.d/ +RUN chmod 0644 /etc/cron.d/* + ENV PYTHONPATH /srv CMD ./system/run.sh diff --git a/docker/Dockerfile.prd b/docker/Dockerfile.prd index 26fccc48..053f37af 100644 --- a/docker/Dockerfile.prd +++ b/docker/Dockerfile.prd @@ -5,6 +5,7 @@ FROM ${ARCH}python:3.8.9-slim-buster as builder RUN apt-get update && \ apt-get install -y \ build-essential \ + cmake \ curl \ gfortran \ gnupg \ @@ -13,11 +14,11 @@ RUN apt-get update && \ libblas3 \ libfreetype6 \ libfreetype6-dev \ - libhdf5-dev \ libjpeg-dev \ liblapack-dev \ liblapack3 \ libpq-dev \ + libssl-dev \ libtiff5-dev \ && \ apt-get clean && \ @@ -86,7 +87,6 @@ RUN rm -rf \ /usr/local/lib/python3.8/site-packages/matplotlib/mpl-data/sample_data \ /usr/local/lib/python3.8/site-packages/matplotlib/mpl-data/images \ /usr/local/lib/python3.8/site-packages/matplotlib/mpl-data/stylelib \ - /usr/local/lib/python3.8/site-packages/h5py \ /usr/local/lib/python3.8/site-packages/tensorboard \ /usr/local/lib/python3.8/site-packages/tensorboard_plugin_wit @@ -94,17 +94,24 @@ RUN rm -rf \ FROM ${ARCH}python:3.8.9-slim-buster RUN apt-get update && \ - apt-get install -y \ + apt-get install -y --no-install-recommends \ + cron \ dcraw \ + file \ libatlas3-base \ libfreetype6 \ libfreetype6-dev \ + libgl1 \ + libglib2.0-dev \ + libhdf5-dev \ + libheif-examples \ libimage-exiftool-perl \ libpq-dev \ libtiff5-dev \ netcat \ nginx-light \ supervisor \ + xz-utils \ && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* \ @@ -120,6 +127,7 @@ WORKDIR /srv # Copy over the code COPY photonix /srv/photonix +COPY manage.py /srv/manage.py COPY test.py /srv/test.py COPY tests /srv/tests COPY ui/public /srv/ui/public @@ -128,9 +136,14 @@ COPY ui/public /srv/ui/public COPY system /srv/system COPY system/supervisord.conf /etc/supervisord.conf +# Copy crontab +COPY system/cron.d /etc/cron.d/ +RUN chmod 0644 /etc/cron.d/* + ENV PYTHONPATH /srv +ENV TF_CPP_MIN_LOG_LEVEL 3 -RUN python photonix/manage.py collectstatic --noinput --link +RUN DJANGO_SECRET_KEY=test python photonix/manage.py collectstatic --noinput --link CMD ./system/run.sh diff --git a/docker/docker-compose.dev.yml b/docker/docker-compose.dev.yml index 17dc1a11..2a434e84 100644 --- a/docker/docker-compose.dev.yml +++ b/docker/docker-compose.dev.yml @@ -32,13 +32,14 @@ services: - '8880:8001' environment: ENV: dev - DEMO: 1 + SAMPLE_DATA: 1 POSTGRES_HOST: postgres POSTGRES_DB: photonix POSTGRES_USER: postgres POSTGRES_PASSWORD: password REDIS_HOST: redis ALLOWED_HOSTS: '*' + LOG_LEVEL: DEBUG volumes: - ../photonix:/srv/photonix - ../system:/srv/system diff --git a/docker/docker-compose.example.yml b/docker/docker-compose.example.yml index 62524783..f2728366 100644 --- a/docker/docker-compose.example.yml +++ b/docker/docker-compose.example.yml @@ -27,6 +27,7 @@ services: POSTGRES_PASSWORD: password REDIS_HOST: redis ALLOWED_HOSTS: '*' + # More configuration options here: https://photonix.org/docs/configuration/ volumes: - ./data/photos:/data/photos - ./data/raw-photos-processed:/data/raw-photos-processed diff --git a/manage.py b/manage.py new file mode 120000 index 00000000..ff066519 --- /dev/null +++ b/manage.py @@ -0,0 +1 @@ +photonix/manage.py \ No newline at end of file diff --git a/photonix/accounts/migrations/0004_alter_user_first_name.py b/photonix/accounts/migrations/0004_alter_user_first_name.py new file mode 100644 index 00000000..7efbf1a9 --- /dev/null +++ b/photonix/accounts/migrations/0004_alter_user_first_name.py @@ -0,0 +1,18 @@ +# Generated by Django 3.2.3 on 2021-06-17 22:18 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('accounts', '0003_add_user_signup_flds'), + ] + + operations = [ + migrations.AlterField( + model_name='user', + name='first_name', + field=models.CharField(blank=True, max_length=150, verbose_name='first name'), + ), + ] diff --git a/photonix/accounts/schema.py b/photonix/accounts/schema.py index a57a19e0..2446a879 100644 --- a/photonix/accounts/schema.py +++ b/photonix/accounts/schema.py @@ -3,6 +3,7 @@ from django.contrib.auth import get_user_model, authenticate, update_session_auth_hash import graphene from graphene_django.types import DjangoObjectType +from graphql import GraphQLError from graphql_jwt.shortcuts import create_refresh_token, get_token import graphql_jwt from photonix.photos.models import Library, LibraryPath, LibraryUser @@ -12,18 +13,12 @@ class UserType(DjangoObjectType): - """Docstring for UserType.""" - class Meta: model = User class CreateUser(graphene.Mutation): - """Docstring for CreateUser.""" - class Arguments: - """Docstring for Arguments.""" - username = graphene.String(required=True) password = graphene.String(required=True) password1 = graphene.String(required=True) @@ -34,13 +29,12 @@ class Arguments: @staticmethod def mutate(self, info, username, password, password1): - """Mutate method.""" if User.objects.filter(username=username).exists(): - raise Exception("Username already exists!") + raise GraphQLError('Username already exists!') elif len(password) < 8 and len(password1) < 8: - raise Exception("Password must be at least 8 characters long!") + raise GraphQLError('Password must be at least 8 characters long!') elif password != password1: - raise Exception("Password fields do not match!") + raise GraphQLError('Password fields do not match!') else: user = User(username=username) user.set_password(password1) @@ -53,6 +47,7 @@ def mutate(self, info, username, password, password1): class Environment(graphene.ObjectType): demo = graphene.Boolean() + sample_data = graphene.Boolean() first_run = graphene.Boolean() form = graphene.String() user_id = graphene.ID() @@ -61,11 +56,11 @@ class Environment(graphene.ObjectType): class AfterSignup(graphene.ObjectType): - """Pass token for login, after signup.""" - + '''Pass token for login, after signup.''' token = graphene.String() refresh_token = graphene.String() + class Query(graphene.ObjectType): profile = graphene.Field(UserType) environment = graphene.Field(Environment) @@ -74,56 +69,63 @@ class Query(graphene.ObjectType): def resolve_profile(self, info): user = info.context.user if user.is_anonymous: - raise Exception('Not logged in') + raise GraphQLError('Not logged in') return user def resolve_environment(self, info): user = User.objects.first() + demo = os.environ.get('DEMO', False) + sample_data = os.environ.get('DEMO', False) or os.environ.get('SAMPLE_DATA', False) + if user and user.has_config_persional_info and \ user.has_created_library and user.has_configured_importing and \ user.has_configured_image_analysis: - # raise Exception(info.context.user.is_anonymous) return { - 'demo': os.environ.get('DEMO', False), + 'demo': demo, + 'sample_data': sample_data, 'first_run': False, } else: - if not user: + if not user or not user.is_authenticated: return { - 'demo': os.environ.get('DEMO', False), 'first_run': True, + 'demo': demo, + 'sample_data': sample_data, + 'first_run': True, 'form': 'has_config_persional_info'} if not user.has_created_library: return { - 'demo': os.environ.get('DEMO', False), 'first_run': True, + 'demo': demo, + 'sample_data': sample_data, + 'first_run': True, 'form': 'has_created_library', 'user_id': user.id} if not user.has_configured_importing: return { - 'demo': os.environ.get('DEMO', False), 'first_run': True, + 'demo': demo, + 'sample_data': sample_data, + 'first_run': True, 'form': 'has_configured_importing', 'user_id': user.id, 'library_id': Library.objects.filter(users__user=user)[0].id, 'library_path_id': LibraryPath.objects.filter(library__users__user=user)[0].id } if not user.has_configured_image_analysis: return { - 'demo': os.environ.get('DEMO', False), 'first_run': True, + 'demo': demo, + 'sample_data': sample_data, + 'first_run': True, 'form': 'has_configured_image_analysis', 'user_id': user.id, 'library_id': Library.objects.filter(users__user=user)[0].id, } def resolve_after_signup(self, info): - """To login user from frontend after finish sigunp process.""" + '''To login user from frontend after finish sigunp process.''' user = info.context.user - if user.has_configured_image_analysis: + if user.is_authenticated and user.has_configured_image_analysis: return {'token': get_token(user), 'refresh_token': create_refresh_token(user)} return {'token': None, 'refresh_token': None} class ChangePassword(graphene.Mutation): - """docstring for ChangePassword.""" - class Arguments: - """docstring for Arguments.""" - old_password = graphene.String(required=True) new_password = graphene.String(required=True) @@ -131,9 +133,8 @@ class Arguments: @staticmethod def mutate(self, info, old_password, new_password): - """Mutate method for change password.""" if os.environ.get('DEMO', False) and os.environ.get('ENV') != 'test': - raise Exception("Password cannot be changed in demo mode!") + raise GraphQLError('Password cannot be changed in demo mode!') if authenticate(username=info.context.user.username, password=old_password): info.context.user.set_password(new_password) info.context.user.save() @@ -143,8 +144,6 @@ def mutate(self, info, old_password, new_password): class Mutation(graphene.ObjectType): - """To create objects for all mutaions.""" - token_auth = graphql_jwt.ObtainJSONWebToken.Field() verify_token = graphql_jwt.Verify.Field() refresh_token = graphql_jwt.Refresh.Field() diff --git a/photonix/classifiers/base_model.py b/photonix/classifiers/base_model.py index 91e243d5..9487fae1 100644 --- a/photonix/classifiers/base_model.py +++ b/photonix/classifiers/base_model.py @@ -9,14 +9,16 @@ import logging import requests - -import redis from redis_lock import Lock +from photonix.photos.utils.redis import redis_connection + + graph_cache = {} logger = logging.getLogger(__name__) + class BaseModel: def __init__(self, model_dir=None): global graph_cache @@ -50,8 +52,7 @@ def ensure_downloaded(self, lock_name=None): if not lock_name: lock_name = 'classifier_{}_download'.format(self.name) - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - with Lock(r, lock_name): + with Lock(redis_connection, lock_name): try: with open(version_file) as f: if f.read().strip() == str(self.version): diff --git a/photonix/classifiers/color/model.py b/photonix/classifiers/color/model.py index 7256e9a1..7db3788e 100644 --- a/photonix/classifiers/color/model.py +++ b/photonix/classifiers/color/model.py @@ -17,7 +17,7 @@ def __init__(self): self.colors = { # Name: ((red, green, blue), ordering) - 'Red': ((120, 4, 20), 1), + 'Red': ((120, 4, 20), 1), 'Orange': ((245, 133, 0), 2), 'Amber': ((234, 166, 30), 3), 'Yellow': ((240, 240, 39), 4), @@ -82,15 +82,11 @@ def run_on_photo(photo_id): photo, results = results_for_model_on_photo(model, photo_id) if photo: - from django.utils import timezone from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='C') for name, score in results: tag = get_or_create_tag(library=photo.library, name=name, type='C', source='C', ordering=model.colors[name][1]) PhotoTag(photo=photo, tag=tag, source='C', confidence=score, significance=score).save() - photo.classifier_color_completed_at = timezone.now() - photo.classifier_color_version = getattr(model, 'version', 0) - photo.save() return photo, results diff --git a/photonix/classifiers/event/__init__.py b/photonix/classifiers/event/__init__.py new file mode 100644 index 00000000..9898e544 --- /dev/null +++ b/photonix/classifiers/event/__init__.py @@ -0,0 +1 @@ +from .model import EventModel, run_on_photo diff --git a/photonix/classifiers/event/info.py b/photonix/classifiers/event/info.py new file mode 100644 index 00000000..87213f17 --- /dev/null +++ b/photonix/classifiers/event/info.py @@ -0,0 +1,3 @@ + +name = 'event' +version = 20210505 diff --git a/photonix/classifiers/event/model.py b/photonix/classifiers/event/model.py new file mode 100644 index 00000000..4f0bef21 --- /dev/null +++ b/photonix/classifiers/event/model.py @@ -0,0 +1,58 @@ +import sys +from pathlib import Path +from photonix.photos.utils.metadata import (PhotoMetadata, parse_datetime) +import datetime + + +class EventModel: + version = 20210505 + approx_ram_mb = 120 + max_num_workers = 2 + + def predict(self, image_file): + metadata = PhotoMetadata(image_file) + date_taken = None + possible_date_keys = ['Date/Time Original', 'Date Time Original', 'Date/Time', 'Date Time', 'GPS Date/Time', 'Modify Date', 'File Modification Date/Time'] + for date_key in possible_date_keys: + date_taken = parse_datetime(metadata.get(date_key)) + if date_taken: + events = { + datetime.date(date_taken.year, 12, 25): "Christmas Day", + datetime.date(date_taken.year, 10, 31): "Halloween", + datetime.date(date_taken.year, 2, 14): "Valentine's Day", + datetime.date(date_taken.year, 12, 31): "New Year Start", + datetime.date(date_taken.year, 1, 1): "New Year End", + } + if events.get(date_taken.date()): + if events.get(date_taken.date()).startswith("New Year"): + start_of_day = datetime.datetime.combine(datetime.date(date_taken.year, 12, 31), datetime.datetime.min.time()) + end_of_day = start_of_day + datetime.timedelta(days=1) + if start_of_day <= date_taken.replace(tzinfo=None) <= end_of_day: + return ['New Year'] + return [events.get(date_taken.date())] + return [] + +def run_on_photo(photo_id): + model = EventModel() + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + from photonix.classifiers.runners import results_for_model_on_photo, get_or_create_tag + + photo, results = results_for_model_on_photo(model, photo_id) + if photo: + from photonix.photos.models import PhotoTag + photo.clear_tags(source='C', type='E') + for name in results: + tag = get_or_create_tag(library=photo.library, name=name, type='E', source='C') + PhotoTag(photo=photo, tag=tag, source='C', confidence=0.5, significance=0.5).save() + + return photo, results + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print('Argument required: image file path') + exit(1) + + _, results = run_on_photo(sys.argv[1]) + + print(results) diff --git a/photonix/classifiers/face/__init__.py b/photonix/classifiers/face/__init__.py new file mode 100644 index 00000000..25b2240f --- /dev/null +++ b/photonix/classifiers/face/__init__.py @@ -0,0 +1 @@ +from .model import FaceModel, run_on_photo diff --git a/photonix/classifiers/face/deepface/DeepFace.py b/photonix/classifiers/face/deepface/DeepFace.py new file mode 100644 index 00000000..0129b514 --- /dev/null +++ b/photonix/classifiers/face/deepface/DeepFace.py @@ -0,0 +1,103 @@ +import warnings +warnings.filterwarnings("ignore") + +import os +#os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' + +from photonix.classifiers.face.deepface.basemodels import Facenet +from photonix.classifiers.face.deepface.commons import functions, distance as dst + +import tensorflow as tf +tf_version = int(tf.__version__.split(".")[0]) +if tf_version == 2: + import logging + tf.get_logger().setLevel(logging.ERROR) + +def build_model(model_name): + + """ + This function builds a deepface model + Parameters: + model_name (string): face recognition or facial attribute model + VGG-Face, Facenet, OpenFace, DeepFace, DeepID for face recognition + Age, Gender, Emotion, Race for facial attributes + + Returns: + built deepface model + """ + + models = { + 'Facenet': Facenet.loadModel, + } + + model = models.get(model_name) + + if model: + model = model() + #print('Using {} model backend'.format(model_name)) + return model + else: + raise ValueError('Invalid model_name passed - {}'.format(model_name)) + + +def represent(img_path, model_name = 'VGG-Face', model = None, enforce_detection = True, detector_backend = 'mtcnn'): + + """ + This function represents facial images as vectors. + + Parameters: + img_path: exact image path, numpy array or based64 encoded images could be passed. + + model_name (string): VGG-Face, Facenet, OpenFace, DeepFace, DeepID, Dlib, ArcFace. + + model: Built deepface model. A face recognition model is built every call of verify function. You can pass pre-built face recognition model optionally if you will call verify function several times. Consider to pass model if you are going to call represent function in a for loop. + + model = DeepFace.build_model('VGG-Face') + + enforce_detection (boolean): If any face could not be detected in an image, then verify function will return exception. Set this to False not to have this exception. This might be convenient for low resolution images. + + detector_backend (string): set face detector backend as mtcnn, opencv, ssd or dlib + + Returns: + Represent function returns a multidimensional vector. The number of dimensions is changing based on the reference model. E.g. FaceNet returns 128 dimensional vector; VGG-Face returns 2622 dimensional vector. + """ + + if model is None: + model = build_model(model_name) + + #--------------------------------- + + #decide input shape + input_shape = input_shape_x, input_shape_y= functions.find_input_shape(model) + + #detect and align + img = functions.preprocess_face(img = img_path + , target_size=(input_shape_y, input_shape_x) + , enforce_detection = enforce_detection + , detector_backend = detector_backend) + + #represent + embedding = model.predict(img)[0].tolist() + + return embedding + + +def detectFace(img_path, detector_backend = 'mtcnn'): + + """ + This function applies pre-processing stages of a face recognition pipeline including detection and alignment + + Parameters: + img_path: exact image path, numpy array or base64 encoded image + + detector_backend (string): face detection backends are mtcnn, opencv, ssd or dlib + + Returns: + deteced and aligned face in numpy format + """ + + functions.initialize_detector(detector_backend = detector_backend) + + img = functions.preprocess_face(img = img_path, detector_backend = detector_backend)[0] #preprocess_face returns (1, 224, 224, 3) + return img[:, :, ::-1] #bgr to rgb diff --git a/photonix/classifiers/face/deepface/__init__.py b/photonix/classifiers/face/deepface/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/photonix/classifiers/face/deepface/basemodels/Facenet.py b/photonix/classifiers/face/deepface/basemodels/Facenet.py new file mode 100644 index 00000000..e81ec222 --- /dev/null +++ b/photonix/classifiers/face/deepface/basemodels/Facenet.py @@ -0,0 +1,542 @@ +import os + +from tensorflow.keras.models import Model +from tensorflow.keras.layers import Activation +from tensorflow.keras.layers import BatchNormalization +from tensorflow.keras.layers import Concatenate +from tensorflow.keras.layers import Conv2D +from tensorflow.keras.layers import Dense +from tensorflow.keras.layers import Dropout +from tensorflow.keras.layers import GlobalAveragePooling2D +from tensorflow.keras.layers import Input +from tensorflow.keras.layers import Lambda +from tensorflow.keras.layers import MaxPooling2D +from tensorflow.keras.layers import add +from tensorflow.keras import backend as K + + +def scaling(x, scale): + return x * scale + + +def InceptionResNetV2(): + + inputs = Input(shape=(160, 160, 3)) + x = Conv2D(32, 3, strides=2, padding='valid', use_bias=False, name= 'Conv2d_1a_3x3') (inputs) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_1a_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_1a_3x3_Activation')(x) + x = Conv2D(32, 3, strides=1, padding='valid', use_bias=False, name= 'Conv2d_2a_3x3') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_2a_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_2a_3x3_Activation')(x) + x = Conv2D(64, 3, strides=1, padding='same', use_bias=False, name= 'Conv2d_2b_3x3') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_2b_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_2b_3x3_Activation')(x) + x = MaxPooling2D(3, strides=2, name='MaxPool_3a_3x3')(x) + x = Conv2D(80, 1, strides=1, padding='valid', use_bias=False, name= 'Conv2d_3b_1x1') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_3b_1x1_BatchNorm')(x) + x = Activation('relu', name='Conv2d_3b_1x1_Activation')(x) + x = Conv2D(192, 3, strides=1, padding='valid', use_bias=False, name= 'Conv2d_4a_3x3') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_4a_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_4a_3x3_Activation')(x) + x = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Conv2d_4b_3x3') (x) + x = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Conv2d_4b_3x3_BatchNorm')(x) + x = Activation('relu', name='Conv2d_4b_3x3_Activation')(x) + + # 5x Block35 (Inception-ResNet-A block): + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_1_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_1_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_1_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_1_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_1_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_1_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_1_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_1_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_1_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_1_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_1_Activation')(x) + + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_2_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_2_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_2_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_2_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_2_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_2_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_2_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_2_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_2_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_2_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_2_Activation')(x) + + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_3_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_3_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_3_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_3_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_3_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_3_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_3_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_3_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_3_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_3_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_3_Activation')(x) + + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_4_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_4_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_4_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_4_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_4_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_4_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_4_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_4_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_4_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_4_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_4_Activation')(x) + + branch_0 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block35_5_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_5_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block35_5_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_2 = Conv2D(32, 1, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_5_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_5_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(32, 3, strides=1, padding='same', use_bias=False, name= 'Block35_5_Branch_2_Conv2d_0c_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block35_5_Branch_2_Conv2d_0c_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Block35_5_Branch_2_Conv2d_0c_3x3_Activation')(branch_2) + branches = [branch_0, branch_1, branch_2] + mixed = Concatenate(axis=3, name='Block35_5_Concatenate')(branches) + up = Conv2D(256, 1, strides=1, padding='same', use_bias=True, name= 'Block35_5_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.17})(up) + x = add([x, up]) + x = Activation('relu', name='Block35_5_Activation')(x) + + # Mixed 6a (Reduction-A block): + branch_0 = Conv2D(384, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_6a_Branch_0_Conv2d_1a_3x3') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_0_Conv2d_1a_3x3_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Mixed_6a_Branch_0_Conv2d_1a_3x3_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_6a_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_6a_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, 3, strides=1, padding='same', use_bias=False, name= 'Mixed_6a_Branch_1_Conv2d_0b_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_1_Conv2d_0b_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_6a_Branch_1_Conv2d_0b_3x3_Activation')(branch_1) + branch_1 = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_6a_Branch_1_Conv2d_1a_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_6a_Branch_1_Conv2d_1a_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_6a_Branch_1_Conv2d_1a_3x3_Activation')(branch_1) + branch_pool = MaxPooling2D(3, strides=2, padding='valid', name='Mixed_6a_Branch_2_MaxPool_1a_3x3')(x) + branches = [branch_0, branch_1, branch_pool] + x = Concatenate(axis=3, name='Mixed_6a')(branches) + + # 10x Block17 (Inception-ResNet-B block): + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_1_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_1_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_1_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_1_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_1_Branch_1_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_1_Branch_1_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_1_Branch_1_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_1_Branch_1_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_1_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_1_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_1_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_2_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_2_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_2_Branch_2_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_2_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_2_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_2_Branch_2_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_2_Branch_2_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_2_Branch_2_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_2_Branch_2_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_2_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_2_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_2_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_3_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_3_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_3_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_3_Branch_3_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_3_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_3_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_3_Branch_3_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_3_Branch_3_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_3_Branch_3_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_3_Branch_3_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_3_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_3_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_3_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_4_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_4_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_4_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_4_Branch_4_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_4_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_4_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_4_Branch_4_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_4_Branch_4_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_4_Branch_4_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_4_Branch_4_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_4_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_4_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_4_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_5_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_5_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_5_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_5_Branch_5_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_5_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_5_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_5_Branch_5_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_5_Branch_5_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_5_Branch_5_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_5_Branch_5_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_5_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_5_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_5_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_6_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_6_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_6_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_6_Branch_6_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_6_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_6_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_6_Branch_6_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_6_Branch_6_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_6_Branch_6_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_6_Branch_6_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_6_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_6_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_6_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_7_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_7_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_7_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_7_Branch_7_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_7_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_7_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_7_Branch_7_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_7_Branch_7_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_7_Branch_7_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_7_Branch_7_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_7_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_7_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_7_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_8_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_8_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_8_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_8_Branch_8_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_8_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_8_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_8_Branch_8_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_8_Branch_8_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_8_Branch_8_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_8_Branch_8_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_8_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_8_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_8_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_9_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_9_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_9_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_9_Branch_9_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_9_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_9_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_9_Branch_9_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_9_Branch_9_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_9_Branch_9_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_9_Branch_9_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_9_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_9_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_9_Activation')(x) + + branch_0 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block17_10_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(128, 1, strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_10_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_10_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_10_Branch_10_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(128, [1, 7], strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_10_Conv2d_0b_1x7') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_10_Conv2d_0b_1x7_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_10_Branch_10_Conv2d_0b_1x7_Activation')(branch_1) + branch_1 = Conv2D(128, [7, 1], strides=1, padding='same', use_bias=False, name= 'Block17_10_Branch_10_Conv2d_0c_7x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block17_10_Branch_10_Conv2d_0c_7x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block17_10_Branch_10_Conv2d_0c_7x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block17_10_Concatenate')(branches) + up = Conv2D(896, 1, strides=1, padding='same', use_bias=True, name= 'Block17_10_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.1})(up) + x = add([x, up]) + x = Activation('relu', name='Block17_10_Activation')(x) + + # Mixed 7a (Reduction-B block): 8 x 8 x 2080 + branch_0 = Conv2D(256, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_0_Conv2d_0a_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_0_Conv2d_0a_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Mixed_7a_Branch_0_Conv2d_0a_1x1_Activation')(branch_0) + branch_0 = Conv2D(384, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_7a_Branch_0_Conv2d_1a_3x3') (branch_0) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_0_Conv2d_1a_3x3_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Mixed_7a_Branch_0_Conv2d_1a_3x3_Activation')(branch_0) + branch_1 = Conv2D(256, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_7a_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_7a_Branch_1_Conv2d_1a_3x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_1_Conv2d_1a_3x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Mixed_7a_Branch_1_Conv2d_1a_3x3_Activation')(branch_1) + branch_2 = Conv2D(256, 1, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_2_Conv2d_0a_1x1') (x) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Mixed_7a_Branch_2_Conv2d_0a_1x1_Activation')(branch_2) + branch_2 = Conv2D(256, 3, strides=1, padding='same', use_bias=False, name= 'Mixed_7a_Branch_2_Conv2d_0b_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_2_Conv2d_0b_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Mixed_7a_Branch_2_Conv2d_0b_3x3_Activation')(branch_2) + branch_2 = Conv2D(256, 3, strides=2, padding='valid', use_bias=False, name= 'Mixed_7a_Branch_2_Conv2d_1a_3x3') (branch_2) + branch_2 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Mixed_7a_Branch_2_Conv2d_1a_3x3_BatchNorm')(branch_2) + branch_2 = Activation('relu', name='Mixed_7a_Branch_2_Conv2d_1a_3x3_Activation')(branch_2) + branch_pool = MaxPooling2D(3, strides=2, padding='valid', name='Mixed_7a_Branch_3_MaxPool_1a_3x3')(x) + branches = [branch_0, branch_1, branch_2, branch_pool] + x = Concatenate(axis=3, name='Mixed_7a')(branches) + + # 5x Block8 (Inception-ResNet-C block): + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_1_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_1_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_1_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_1_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_1_Branch_1_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_1_Branch_1_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_1_Branch_1_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_1_Branch_1_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_1_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_1_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_1_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_2_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_2_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_2_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_2_Branch_2_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_2_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_2_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_2_Branch_2_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_2_Branch_2_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_2_Branch_2_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_2_Branch_2_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_2_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_2_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_2_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_3_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_3_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_3_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_3_Branch_3_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_3_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_3_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_3_Branch_3_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_3_Branch_3_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_3_Branch_3_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_3_Branch_3_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_3_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_3_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_3_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_4_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_4_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_4_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_4_Branch_4_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_4_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_4_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_4_Branch_4_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_4_Branch_4_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_4_Branch_4_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_4_Branch_4_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_4_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_4_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_4_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_5_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_5_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_5_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_5_Branch_5_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_5_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_5_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_5_Branch_5_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_5_Branch_5_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_5_Branch_5_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_5_Branch_5_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_5_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_5_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 0.2})(up) + x = add([x, up]) + x = Activation('relu', name='Block8_5_Activation')(x) + + branch_0 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_0_Conv2d_1x1') (x) + branch_0 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_0_Conv2d_1x1_BatchNorm')(branch_0) + branch_0 = Activation('relu', name='Block8_6_Branch_0_Conv2d_1x1_Activation')(branch_0) + branch_1 = Conv2D(192, 1, strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_1_Conv2d_0a_1x1') (x) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_1_Conv2d_0a_1x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_6_Branch_1_Conv2d_0a_1x1_Activation')(branch_1) + branch_1 = Conv2D(192, [1, 3], strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_1_Conv2d_0b_1x3') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_1_Conv2d_0b_1x3_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_6_Branch_1_Conv2d_0b_1x3_Activation')(branch_1) + branch_1 = Conv2D(192, [3, 1], strides=1, padding='same', use_bias=False, name= 'Block8_6_Branch_1_Conv2d_0c_3x1') (branch_1) + branch_1 = BatchNormalization(axis=3, momentum=0.995, epsilon=0.001, scale=False, name='Block8_6_Branch_1_Conv2d_0c_3x1_BatchNorm')(branch_1) + branch_1 = Activation('relu', name='Block8_6_Branch_1_Conv2d_0c_3x1_Activation')(branch_1) + branches = [branch_0, branch_1] + mixed = Concatenate(axis=3, name='Block8_6_Concatenate')(branches) + up = Conv2D(1792, 1, strides=1, padding='same', use_bias=True, name= 'Block8_6_Conv2d_1x1') (mixed) + up = Lambda(scaling, output_shape=K.int_shape(up)[1:], arguments={'scale': 1})(up) + x = add([x, up]) + + # Classification block + x = GlobalAveragePooling2D(name='AvgPool')(x) + x = Dropout(1.0 - 0.8, name='Dropout')(x) + # Bottleneck + x = Dense(128, use_bias=False, name='Bottleneck')(x) + x = BatchNormalization(momentum=0.995, epsilon=0.001, scale=False, name='Bottleneck_BatchNorm')(x) + + # Create model + model = Model(inputs, x, name='inception_resnet_v1') + + return model + + +def loadModel(): + model = InceptionResNetV2() + weights = '/data/models/face/facenet_weights.h5' + + if os.path.isfile(weights) != True: + raise FileNotFoundError('Facenet weights does not exist') + + model.load_weights(weights) + + return model diff --git a/photonix/classifiers/face/deepface/basemodels/__init__.py b/photonix/classifiers/face/deepface/basemodels/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/photonix/classifiers/face/deepface/commons/__init__.py b/photonix/classifiers/face/deepface/commons/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/photonix/classifiers/face/deepface/commons/distance.py b/photonix/classifiers/face/deepface/commons/distance.py new file mode 100644 index 00000000..0e4dd786 --- /dev/null +++ b/photonix/classifiers/face/deepface/commons/distance.py @@ -0,0 +1,43 @@ +import numpy as np + + +def findCosineDistance(source_representation, test_representation): + a = np.matmul(np.transpose(source_representation), test_representation) + b = np.sum(np.multiply(source_representation, source_representation)) + c = np.sum(np.multiply(test_representation, test_representation)) + return 1 - (a / (np.sqrt(b) * np.sqrt(c))) + + +def findEuclideanDistance(source_representation, test_representation): + if type(source_representation) == list: + source_representation = np.array(source_representation) + + if type(test_representation) == list: + test_representation = np.array(test_representation) + + euclidean_distance = source_representation - test_representation + euclidean_distance = np.sum(np.multiply(euclidean_distance, euclidean_distance)) + euclidean_distance = np.sqrt(euclidean_distance) + return euclidean_distance + + +def l2_normalize(x): + return x / np.sqrt(np.sum(np.multiply(x, x))) + + +def findThreshold(model_name, distance_metric): + base_threshold = {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75} + + thresholds = { + 'VGG-Face': {'cosine': 0.40, 'euclidean': 0.55, 'euclidean_l2': 0.75}, + 'OpenFace': {'cosine': 0.10, 'euclidean': 0.55, 'euclidean_l2': 0.55}, + 'Facenet': {'cosine': 0.40, 'euclidean': 10, 'euclidean_l2': 0.80}, + 'DeepFace': {'cosine': 0.23, 'euclidean': 64, 'euclidean_l2': 0.64}, + 'DeepID': {'cosine': 0.015, 'euclidean': 45, 'euclidean_l2': 0.17}, + 'Dlib': {'cosine': 0.07, 'euclidean': 0.6, 'euclidean_l2': 0.6}, + 'ArcFace': {'cosine': 0.6871912959056619, 'euclidean': 4.1591468986978075, 'euclidean_l2': 1.1315718048269017} + } + + threshold = thresholds.get(model_name, base_threshold).get(distance_metric, 0.4) + + return threshold diff --git a/photonix/classifiers/face/deepface/commons/functions.py b/photonix/classifiers/face/deepface/commons/functions.py new file mode 100644 index 00000000..71c56d21 --- /dev/null +++ b/photonix/classifiers/face/deepface/commons/functions.py @@ -0,0 +1,227 @@ +import base64 +import math +import os + +import cv2 +import numpy as np +from PIL import Image +from tensorflow.keras.preprocessing.image import load_img, save_img, img_to_array +from tensorflow.keras.applications.imagenet_utils import preprocess_input +from tensorflow.keras.preprocessing import image + +from photonix.classifiers.face.deepface.commons import distance +from photonix.classifiers.face.mtcnn import MTCNN # 0.1.0 + + +def initialize_input(img1_path, img2_path = None): + + if type(img1_path) == list: + bulkProcess = True + img_list = img1_path.copy() + else: + bulkProcess = False + + if ( + (type(img2_path) == str and img2_path != None) # exact image path, base64 image + or (isinstance(img2_path, np.ndarray) and img2_path.any()) # numpy array + ): + img_list = [[img1_path, img2_path]] + else: # analyze function passes just img1_path + img_list = [img1_path] + + return img_list, bulkProcess + + +def initialize_detector(detector_backend): + global face_detector + + if detector_backend == 'mtcnn': + face_detector = MTCNN() + + else: + raise ValueError('mtcnn is the only detector backend available') + + +def loadBase64Img(uri): + encoded_data = uri.split(',')[1] + nparr = np.fromstring(base64.b64decode(encoded_data), np.uint8) + img = cv2.imdecode(nparr, cv2.IMREAD_COLOR) + return img + + +def get_opencv_path(): + opencv_home = cv2.__file__ + folders = opencv_home.split(os.path.sep)[0:-1] + + path = folders[0] + for folder in folders[1:]: + path = path + "/" + folder + + return path+"/data/" + + +def load_image(img): + exact_image = False + if type(img).__module__ == np.__name__: + exact_image = True + + base64_img = False + if len(img) > 11 and img[0:11] == "data:image/": + base64_img = True + + if base64_img == True: + img = loadBase64Img(img) + + elif exact_image != True: # image path passed as input + if os.path.isfile(img) != True: + raise ValueError("Confirm that ", img, " exists") + + img = cv2.imread(img) + + return img + + +def detect_face(img, detector_backend = 'opencv', grayscale = False, enforce_detection = True): + img_region = [0, 0, img.shape[0], img.shape[1]] + + # if functions.preproces_face is called directly, then face_detector global variable might not been initialized. + if not "face_detector" in globals(): + initialize_detector(detector_backend = detector_backend) + + if detector_backend == 'mtcnn': + + img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR + detections = face_detector.detect_faces(img_rgb) + + if len(detections) > 0: + detection = detections[0] + x, y, w, h = detection["box"] + detected_face = img[int(y):int(y+h), int(x):int(x+w)] + return detected_face, [x, y, w, h] + + else: # if no face detected + if not enforce_detection: + return img, img_region + + else: + raise ValueError("Face could not be detected. Please confirm that the picture is a face photo or consider to set enforce_detection param to False.") + + else: + detectors = ['mtcnn'] + raise ValueError("Valid backends are ", detectors," but you passed ", detector_backend) + + +def alignment_procedure(img, left_eye, right_eye): + # this function aligns given face in img based on left and right eye coordinates + + left_eye_x, left_eye_y = left_eye + right_eye_x, right_eye_y = right_eye + + #----------------------- + # find rotation direction + + if left_eye_y > right_eye_y: + point_3rd = (right_eye_x, left_eye_y) + direction = -1 # rotate same direction to clock + else: + point_3rd = (left_eye_x, right_eye_y) + direction = 1 # rotate inverse direction of clock + + #----------------------- + # find length of triangle edges + + a = distance.findEuclideanDistance(np.array(left_eye), np.array(point_3rd)) + b = distance.findEuclideanDistance(np.array(right_eye), np.array(point_3rd)) + c = distance.findEuclideanDistance(np.array(right_eye), np.array(left_eye)) + + #----------------------- + + # apply cosine rule + + if b != 0 and c != 0: # this multiplication causes division by zero in cos_a calculation + + cos_a = (b*b + c*c - a*a)/(2*b*c) + angle = np.arccos(cos_a) # angle in radian + angle = (angle * 180) / math.pi # radian to degree + + #----------------------- + # rotate base image + + if direction == -1: + angle = 90 - angle + + img = Image.fromarray(img) + img = np.array(img.rotate(direction * angle)) + + #----------------------- + + return img # return img anyway + + +def align_face(img, detector_backend = 'mtcnn'): + if detector_backend == 'mtcnn': + + img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB but OpenCV read BGR + detections = face_detector.detect_faces(img_rgb) + + if len(detections) > 0: + detection = detections[0] + + keypoints = detection["keypoints"] + left_eye = keypoints["left_eye"] + right_eye = keypoints["right_eye"] + + img = alignment_procedure(img, left_eye, right_eye) + + return img # return img anyway + + +def preprocess_face(img, target_size=(224, 224), grayscale = False, enforce_detection = True, detector_backend = 'opencv', return_region = False): + img = load_image(img) + base_img = img.copy() + + img, region = detect_face(img = img, detector_backend = detector_backend, grayscale = grayscale, enforce_detection = enforce_detection) + + #-------------------------- + + if img.shape[0] > 0 and img.shape[1] > 0: + img = align_face(img = img, detector_backend = detector_backend) + else: + + if enforce_detection == True: + raise ValueError("Detected face shape is ", img.shape,". Consider to set enforce_detection argument to False.") + else: # restore base image + img = base_img.copy() + + #-------------------------- + + # post-processing + if grayscale == True: + img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + + img = cv2.resize(img, target_size) + img_pixels = image.img_to_array(img) + img_pixels = np.expand_dims(img_pixels, axis = 0) + img_pixels /= 255 #normalize input in [0, 1] + + if return_region == True: + return img_pixels, region + else: + return img_pixels + + +def find_input_shape(model): + # face recognition models have different size of inputs + # my environment returns (None, 224, 224, 3) but some people mentioned that they got [(None, 224, 224, 3)]. I think this is because of version issue. + + input_shape = model.layers[0].input_shape + + if type(input_shape) == list: + input_shape = input_shape[0][1:3] + else: + input_shape = input_shape[1:3] + + if type(input_shape) == list: # issue 197: some people got array here instead of tuple + input_shape = tuple(input_shape) + + return input_shape diff --git a/photonix/classifiers/face/deepface/models/__init__.py b/photonix/classifiers/face/deepface/models/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/photonix/classifiers/face/model.py b/photonix/classifiers/face/model.py new file mode 100644 index 00000000..42be49f0 --- /dev/null +++ b/photonix/classifiers/face/model.py @@ -0,0 +1,315 @@ +from datetime import datetime +import json +import os +import sys +from pathlib import Path +from random import randint + +from annoy import AnnoyIndex +from django.utils import timezone +import numpy as np +from PIL import Image +from redis_lock import Lock + +from photonix.classifiers.base_model import BaseModel +from photonix.classifiers.face.deepface import DeepFace +from photonix.classifiers.face.mtcnn import MTCNN +from photonix.classifiers.face.deepface.commons.distance import findEuclideanDistance +from photonix.classifiers.face.deepface.DeepFace import build_model +from photonix.photos.utils.redis import redis_connection +from photonix.photos.utils.metadata import PhotoMetadata + + +GRAPH_FILE = os.path.join('face', 'mtcnn_weights.npy') +DISTANCE_THRESHOLD = 10 + + +class FaceModel(BaseModel): + name = 'face' + version = 20210528 + retrained_version = 0 + library_id = None + approx_ram_mb = 600 + max_num_workers = 1 + + def __init__(self, model_dir=None, graph_file=GRAPH_FILE, library_id=None, lock_name=None): + super().__init__(model_dir=model_dir) + self.library_id = library_id + + graph_file = os.path.join(self.model_dir, graph_file) + + if self.ensure_downloaded(lock_name=lock_name): + self.graph = self.load_graph(graph_file) + + + def load_graph(self, graph_file): + with Lock(redis_connection, 'classifier_{}_load_graph'.format(self.name)): + # Load MTCNN + mtcnn_graph = None + mtcnn_key = '{self.graph_cache_key}:mtcnn' + if mtcnn_key in self.graph_cache: + mtcnn_graph = self.graph_cache[mtcnn_key] + else: + mtcnn_graph = MTCNN(weights_file=graph_file) + self.graph_cache[mtcnn_key] = mtcnn_graph + + # Load Facenet + facenet_graph = None + facenet_key = '{self.graph_cache_key}:facenet' + if facenet_key in self.graph_cache: + facenet_graph = self.graph_cache[facenet_key] + else: + facenet_graph = build_model('Facenet') + self.graph_cache[facenet_key] = facenet_graph + + # Store version number of retrained model (ANN) if it has been computed + self.reload_retrained_model_version() + + return { + 'mtcnn': mtcnn_graph, + 'facenet': facenet_graph, + } + + def predict(self, image_file, min_score=0.99): + # Detects face bounding boxes + image = Image.open(image_file) + + if image.mode != 'RGB': + image = image.convert('RGB') + + # Perform rotations if decalared in metadata + metadata = PhotoMetadata(image_file) + if metadata.get('Orientation') in ['Rotate 90 CW', 'Rotate 270 CCW']: + image = image.rotate(-90, expand=True) + elif metadata.get('Orientation') in ['Rotate 90 CCW', 'Rotate 270 CW']: + image = image.rotate(90, expand=True) + + image = np.asarray(image) + results = self.graph['mtcnn'].detect_faces(image) + return list(filter(lambda f: f['confidence'] > min_score, results)) + + def crop(self, image_data, box): + return image_data.crop([ + max(box[0]-int(box[2]*0.3), 0), + max(box[1]-int(box[3]*0.3), 0), + min(box[0]+box[2]+int(box[2]*0.3), image_data.width), + min(box[1]+box[3]+int(box[3]*0.3), image_data.height) + ]) + + def get_face_embedding(self, image_data): + return DeepFace.represent(np.asarray(image_data), model_name='Facenet', model= self.graph['facenet']) + + def find_closest_face_tag_by_ann(self, source_embedding): + # Use ANN index to do quick serach if it has been trained by retrain_face_similarity_index + from django.conf import settings + ann_path = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_faces.ann' + tag_ids_path = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_faces_tag_ids.json' + + if os.path.exists(ann_path) and os.path.exists(tag_ids_path): + embedding_size = 128 # FaceNet output size + t = AnnoyIndex(embedding_size, 'euclidean') + # Ensure ANN index, tag IDs and version files can't be updated while we are reading + with Lock(redis_connection, 'face_model_retrain'): + self.reload_retrained_model_version() + t.load(str(ann_path)) + with open(tag_ids_path) as f: + tag_ids = json.loads(f.read()) + nearest = t.get_nns_by_vector(source_embedding, 1, include_distances=True) + if nearest[0]: + return tag_ids[nearest[0][0]], nearest[1][0] + + return (None, 999) + + def find_closest_face_tag_by_brute_force(self, source_embedding, oldest_date=None, target_data=None): + if not self.library_id and not target_data: + raise ValueError('No Library ID is set') + + representations = [] + if target_data: # Mainly as an option for testing + for id, embedding in target_data: + representations.append((id, embedding)) + else: + # Collect all previously generated embeddings + from photonix.photos.models import PhotoTag + photo_tags = PhotoTag.objects.filter(photo__library_id=self.library_id, tag__type='F') + if oldest_date: + photo_tags = photo_tags.filter(created_at__gt=oldest_date) + for photo_tag in photo_tags: + try: + tag_embedding = json.loads(photo_tag.extra_data)['facenet_embedding'] + representations.append((str(photo_tag.tag.id), tag_embedding)) + except (KeyError, json.decoder.JSONDecodeError): + pass + + # Calculate Euclidean distances + distances = [] + for (_, target_embedding) in representations: + distance = findEuclideanDistance(source_embedding, target_embedding) + distances.append(distance) + + # Return closest match and distance value + if not distances: # First face added has nothing to compare to + return (None, 999) + index = np.argmin(distances) + return (representations[index][0], distances[index]) + + def find_closest_face_tag(self, source_embedding): + if not self.library_id: + raise ValueError('No Library ID is set') + + ann_nearest, ann_distance = self.find_closest_face_tag_by_ann(source_embedding) + + oldest_date = None + if self.retrained_version: + oldest_date = datetime.strptime(str(self.retrained_version), '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) + + brute_force_nearest, brute_force_distance = self.find_closest_face_tag_by_brute_force(source_embedding, oldest_date=oldest_date) + + if ann_nearest and ann_distance < brute_force_distance: + return ann_nearest, ann_distance + else: + return brute_force_nearest, brute_force_distance + + def retrain_face_similarity_index(self, training_data=None): + if not self.library_id and not training_data: + raise ValueError('No Library ID is set') + + from django.conf import settings + from photonix.photos.models import PhotoTag + ann_path = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_faces.ann' + tag_ids_path = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_faces_tag_ids.json' + version_file = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_retrained_version.txt' + + embedding_size = 128 # FaceNet output size + t = AnnoyIndex(embedding_size, 'euclidean') + retrained_version = datetime.utcnow().strftime('%Y%m%d%H%M%S') + + tag_ids = [] + if training_data: # Mainly as an option for testing + for id, embedding in training_data: + t.add_item(len(tag_ids), embedding) + tag_ids.append(id) + else: + for photo_tag in PhotoTag.objects.filter(tag__type='F').order_by('id'): + try: + extra_data = json.loads(photo_tag.extra_data) + embedding = extra_data['facenet_embedding'] + t.add_item(len(tag_ids), embedding) + tag_ids.append(str(photo_tag.tag.id)) + except (json.decoder.JSONDecodeError, KeyError, TypeError): + pass + + # Build the ANN index + t.build(3) # Number of random forest trees + + # Aquire lock to save ANN, tag IDs and version files atomically + with Lock(redis_connection, 'face_model_retrain'): + # Save ANN index + t.save(str(ann_path)) + + # Save Tag IDs to JSON file as Annoy only supports integer IDs so we have to do the mapping ourselves + with open(tag_ids_path, 'w') as f: + f.write(json.dumps(tag_ids)) + + # Save version of retrained model to text file - used to save against on PhotoTag model and to determine whether retraining is required + with open(version_file, 'w') as f: + f.write(retrained_version) + + def reload_retrained_model_version(self): + if self.library_id: + from django.conf import settings + version_file = Path(settings.MODEL_DIR) / 'face' / f'{self.library_id}_retrained_version.txt' + version_date = None + if os.path.exists(version_file): + with open(version_file) as f: + contents = f.read().strip() + version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) + self.retrained_version = int(version_date.strftime('%Y%m%d%H%M%S')) + return self.retrained_version + return 0 + + +def run_on_photo(photo_id): + sys.path.insert(0, str(Path(__file__).resolve().parent.parent)) + from photonix.classifiers.runners import get_photo_by_any_type, results_for_model_on_photo, get_or_create_tag + + photo = get_photo_by_any_type(photo_id) + model = FaceModel(library_id=photo and photo.library_id) + + # Detect all faces in an image + photo, results = results_for_model_on_photo(model, photo_id) + + # Read image data so we can extract faces and create embeddings + path = photo_id + if photo: + path = photo.base_image_path + model.library_id = photo.library_id + image_data = Image.open(path) + + # Loop over each face that was detected above + for result in results: + # Crop individual face + 30% extra in each direction + box = result['box'] + face_image = model.crop(image_data, box) + # Generate embedding with Facenet + try: + embedding = model.get_face_embedding(face_image) + # Add it to the results + result['embedding'] = embedding + if photo: + closest_tag, closest_distance = model.find_closest_face_tag(embedding) + if closest_tag: + print(f'Closest tag: {closest_tag}') + print(f'Closest distance: {closest_distance}') + result['closest_tag'] = closest_tag + result['closest_distance'] = closest_distance + except ValueError: + pass + + if photo: + from django.utils import timezone + from photonix.photos.models import Tag, PhotoTag + + photo.clear_tags(source='C', type='F') + for result in results: + # Use matched tag if within distance threshold + if result.get('closest_distance', 999) < DISTANCE_THRESHOLD: + tag = Tag.objects.get(id=result['closest_tag'], library=photo.library, type='F') + print(f'MATCHED {tag.name}') + + # Otherwise create new tag + else: + while True: + random_name = f'Unknown person {randint(0, 999999):06d}' + try: + Tag.objects.get(library=photo.library, name=random_name, type='F', source='C') + except Tag.DoesNotExist: + tag = Tag(library=photo.library, name=random_name, type='F', source='C') + tag.save() + break + + x = (result['box'][0] + (result['box'][2] / 2)) / photo.base_file.width + y = (result['box'][1] + (result['box'][3] / 2)) / photo.base_file.height + width = result['box'][2] / photo.base_file.width + height = result['box'][3] / photo.base_file.height + score = result['confidence'] + + extra_data = '' + if 'embedding' in result: + extra_data = json.dumps({'facenet_embedding': result['embedding']}) + + PhotoTag(photo=photo, tag=tag, source='F', confidence=score, significance=score, position_x=x, position_y=y, size_x=width, size_y=height, model_version=model.version, retrained_model_version=model.retrained_version, extra_data=extra_data).save() + photo.classifier_color_completed_at = timezone.now() + photo.classifier_color_version = getattr(model, 'version', 0) + photo.save() + + return photo, results + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print('Argument required: image file path or Photo ID') + exit(1) + + _, results = run_on_photo(sys.argv[1]) + print(results) diff --git a/photonix/classifiers/face/mtcnn/__init__.py b/photonix/classifiers/face/mtcnn/__init__.py new file mode 100644 index 00000000..80f5d4cd --- /dev/null +++ b/photonix/classifiers/face/mtcnn/__init__.py @@ -0,0 +1,30 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .mtcnn import MTCNN + + +__author__ = "Iván de Paz Centeno" +__version__= "0.1.0" diff --git a/photonix/classifiers/face/mtcnn/exceptions/__init__.py b/photonix/classifiers/face/mtcnn/exceptions/__init__.py new file mode 100644 index 00000000..efe2ac45 --- /dev/null +++ b/photonix/classifiers/face/mtcnn/exceptions/__init__.py @@ -0,0 +1,26 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from .invalid_image import InvalidImage diff --git a/photonix/classifiers/face/mtcnn/exceptions/invalid_image.py b/photonix/classifiers/face/mtcnn/exceptions/invalid_image.py new file mode 100755 index 00000000..ecfe9bc5 --- /dev/null +++ b/photonix/classifiers/face/mtcnn/exceptions/invalid_image.py @@ -0,0 +1,30 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + + +__author__ = "Iván de Paz Centeno" + +class InvalidImage(Exception): + pass diff --git a/photonix/classifiers/face/mtcnn/layer_factory.py b/photonix/classifiers/face/mtcnn/layer_factory.py new file mode 100644 index 00000000..89c39d59 --- /dev/null +++ b/photonix/classifiers/face/mtcnn/layer_factory.py @@ -0,0 +1,227 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +#MIT License +# +#Copyright (c) 2018 Iván de Paz Centeno +# +#Permission is hereby granted, free of charge, to any person obtaining a copy +#of this software and associated documentation files (the "Software"), to deal +#in the Software without restriction, including without limitation the rights +#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +#copies of the Software, and to permit persons to whom the Software is +#furnished to do so, subject to the following conditions: +# +#The above copyright notice and this permission notice shall be included in all +#copies or substantial portions of the Software. +# +#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +#SOFTWARE. + +import tensorflow as tf +from distutils.version import LooseVersion + +__author__ = "Iván de Paz Centeno" + + +class LayerFactory(object): + """ + Allows to create stack layers for a given network. + """ + + AVAILABLE_PADDINGS = ('SAME', 'VALID') + + def __init__(self, network): + self.__network = network + + @staticmethod + def __validate_padding(padding): + if padding not in LayerFactory.AVAILABLE_PADDINGS: + raise Exception("Padding {} not valid".format(padding)) + + @staticmethod + def __validate_grouping(channels_input: int, channels_output: int, group: int): + if channels_input % group != 0: + raise Exception("The number of channels in the input does not match the group") + + if channels_output % group != 0: + raise Exception("The number of channels in the output does not match the group") + + @staticmethod + def vectorize_input(input_layer): + input_shape = input_layer.get_shape() + + if input_shape.ndims == 4: + # Spatial input, must be vectorized. + dim = 1 + for x in input_shape[1:].as_list(): + dim *= int(x) + + #dim = operator.mul(*(input_shape[1:].as_list())) + vectorized_input = tf.reshape(input_layer, [-1, dim]) + else: + vectorized_input, dim = (input_layer, input_shape[-1]) + + return vectorized_input, dim + + def __make_var(self, name: str, shape: list): + """ + Creates a tensorflow variable with the given name and shape. + :param name: name to set for the variable. + :param shape: list defining the shape of the variable. + :return: created TF variable. + """ + return tf.compat.v1.get_variable(name, shape, trainable=self.__network.is_trainable(), + use_resource=False) + + def new_feed(self, name: str, layer_shape: tuple): + """ + Creates a feed layer. This is usually the first layer in the network. + :param name: name of the layer + :return: + """ + + feed_data = tf.compat.v1.placeholder(tf.float32, layer_shape, 'input') + self.__network.add_layer(name, layer_output=feed_data) + + def new_conv(self, name: str, kernel_size: tuple, channels_output: int, + stride_size: tuple, padding: str='SAME', + group: int=1, biased: bool=True, relu: bool=True, input_layer_name: str=None): + """ + Creates a convolution layer for the network. + :param name: name for the layer + :param kernel_size: tuple containing the size of the kernel (Width, Height) + :param channels_output: ¿? Perhaps number of channels in the output? it is used as the bias size. + :param stride_size: tuple containing the size of the stride (Width, Height) + :param padding: Type of padding. Available values are: ('SAME', 'VALID') + :param group: groups for the kernel operation. More info required. + :param biased: boolean flag to set if biased or not. + :param relu: boolean flag to set if ReLu should be applied at the end of the layer or not. + :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of + the network. + """ + + # Verify that the padding is acceptable + self.__validate_padding(padding) + + input_layer = self.__network.get_layer(input_layer_name) + + # Get the number of channels in the input + channels_input = int(input_layer.get_shape()[-1]) + + # Verify that the grouping parameter is valid + self.__validate_grouping(channels_input, channels_output, group) + + # Convolution for a given input and kernel + convolve = lambda input_val, kernel: tf.nn.conv2d(input=input_val, + filters=kernel, + strides=[1, stride_size[1], stride_size[0], 1], + padding=padding) + + with tf.compat.v1.variable_scope(name) as scope: + kernel = self.__make_var('weights', shape=[kernel_size[1], kernel_size[0], channels_input // group, channels_output]) + + output = convolve(input_layer, kernel) + + # Add the biases, if required + if biased: + biases = self.__make_var('biases', [channels_output]) + output = tf.nn.bias_add(output, biases) + + # Apply ReLU non-linearity, if required + if relu: + output = tf.nn.relu(output, name=scope.name) + + + self.__network.add_layer(name, layer_output=output) + + def new_prelu(self, name: str, input_layer_name: str=None): + """ + Creates a new prelu layer with the given name and input. + :param name: name for this layer. + :param input_layer_name: name of the layer that serves as input for this one. + """ + input_layer = self.__network.get_layer(input_layer_name) + + with tf.compat.v1.variable_scope(name): + channels_input = int(input_layer.get_shape()[-1]) + alpha = self.__make_var('alpha', shape=[channels_input]) + output = tf.nn.relu(input_layer) + tf.multiply(alpha, -tf.nn.relu(-input_layer)) + + self.__network.add_layer(name, layer_output=output) + + def new_max_pool(self, name:str, kernel_size: tuple, stride_size: tuple, padding='SAME', + input_layer_name: str=None): + """ + Creates a new max pooling layer. + :param name: name for the layer. + :param kernel_size: tuple containing the size of the kernel (Width, Height) + :param stride_size: tuple containing the size of the stride (Width, Height) + :param padding: Type of padding. Available values are: ('SAME', 'VALID') + :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of + the network. + """ + + self.__validate_padding(padding) + + input_layer = self.__network.get_layer(input_layer_name) + + output = tf.nn.max_pool2d(input=input_layer, + ksize=[1, kernel_size[1], kernel_size[0], 1], + strides=[1, stride_size[1], stride_size[0], 1], + padding=padding, + name=name) + + self.__network.add_layer(name, layer_output=output) + + def new_fully_connected(self, name: str, output_count: int, relu=True, input_layer_name: str=None): + """ + Creates a new fully connected layer. + + :param name: name for the layer. + :param output_count: number of outputs of the fully connected layer. + :param relu: boolean flag to set if ReLu should be applied at the end of this layer. + :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of + the network. + """ + + with tf.compat.v1.variable_scope(name): + input_layer = self.__network.get_layer(input_layer_name) + vectorized_input, dimension = self.vectorize_input(input_layer) + + weights = self.__make_var('weights', shape=[dimension, output_count]) + biases = self.__make_var('biases', shape=[output_count]) + operation = tf.compat.v1.nn.relu_layer if relu else tf.compat.v1.nn.xw_plus_b + + fc = operation(vectorized_input, weights, biases, name=name) + + self.__network.add_layer(name, layer_output=fc) + + def new_softmax(self, name, axis, input_layer_name: str=None): + """ + Creates a new softmax layer + :param name: name to set for the layer + :param axis: + :param input_layer_name: name of the input layer for this layer. If None, it will take the last added layer of + the network. + """ + input_layer = self.__network.get_layer(input_layer_name) + + if LooseVersion(tf.__version__) < LooseVersion("1.5.0"): + max_axis = tf.reduce_max(input_tensor=input_layer, axis=axis, keepdims=True) + target_exp = tf.exp(input_layer - max_axis) + normalize = tf.reduce_sum(input_tensor=target_exp, axis=axis, keepdims=True) + else: + max_axis = tf.reduce_max(input_tensor=input_layer, axis=axis, keepdims=True) + target_exp = tf.exp(input_layer - max_axis) + normalize = tf.reduce_sum(input_tensor=target_exp, axis=axis, keepdims=True) + + softmax = tf.math.divide(target_exp, normalize, name) + + self.__network.add_layer(name, layer_output=softmax) + diff --git a/photonix/classifiers/face/mtcnn/mtcnn.py b/photonix/classifiers/face/mtcnn/mtcnn.py new file mode 100644 index 00000000..731d5cab --- /dev/null +++ b/photonix/classifiers/face/mtcnn/mtcnn.py @@ -0,0 +1,500 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# +# This code is derived from the MTCNN implementation of David Sandberg for Facenet +# (https://github.com/davidsandberg/facenet/) +# It has been rebuilt from scratch, taking the David Sandberg's implementation as a reference. +# + +# import cv2 +import numpy as np +from PIL import Image +import pkg_resources + +from .exceptions import InvalidImage +from .network.factory import NetworkFactory + +__author__ = "Iván de Paz Centeno" + + +class StageStatus(object): + """ + Keeps status between MTCNN stages + """ + + def __init__(self, pad_result: tuple = None, width=0, height=0): + self.width = width + self.height = height + self.dy = self.edy = self.dx = self.edx = self.y = self.ey = self.x = self.ex = self.tmpw = self.tmph = [] + + if pad_result is not None: + self.update(pad_result) + + def update(self, pad_result: tuple): + s = self + s.dy, s.edy, s.dx, s.edx, s.y, s.ey, s.x, s.ex, s.tmpw, s.tmph = pad_result + + +class MTCNN(object): + """ + Allows to perform MTCNN Detection -> + a) Detection of faces (with the confidence probability) + b) Detection of keypoints (left eye, right eye, nose, mouth_left, mouth_right) + """ + + def __init__(self, weights_file: str = None, min_face_size: int = 20, steps_threshold: list = None, + scale_factor: float = 0.709): + """ + Initializes the MTCNN. + :param weights_file: file uri with the weights of the P, R and O networks from MTCNN. By default it will load + the ones bundled with the package. + :param min_face_size: minimum size of the face to detect + :param steps_threshold: step's thresholds values + :param scale_factor: scale factor + """ + if steps_threshold is None: + steps_threshold = [0.6, 0.7, 0.7] + + if weights_file is None: + weights_file = '/data/models/face/mtcnn_weights.npy' + + self._min_face_size = min_face_size + self._steps_threshold = steps_threshold + self._scale_factor = scale_factor + + self._pnet, self._rnet, self._onet = NetworkFactory().build_P_R_O_nets_from_file(weights_file) + + @property + def min_face_size(self): + return self._min_face_size + + @min_face_size.setter + def min_face_size(self, mfc=20): + try: + self._min_face_size = int(mfc) + except ValueError: + self._min_face_size = 20 + + def __compute_scale_pyramid(self, m, min_layer): + scales = [] + factor_count = 0 + + while min_layer >= 12: + scales += [m * np.power(self._scale_factor, factor_count)] + min_layer = min_layer * self._scale_factor + factor_count += 1 + + return scales + + @staticmethod + def __scale_image(image, scale: float): + """ + Scales the image to a given scale. + :param image: + :param scale: + :return: + """ + height, width, _ = image.shape + + width_scaled = int(np.ceil(width * scale)) + height_scaled = int(np.ceil(height * scale)) + + # im_data = cv2.resize(image, (width_scaled, height_scaled), interpolation = cv2.INTER_AREA) + im_data = Image.fromarray(image).resize((width_scaled, height_scaled), Image.BICUBIC) + im_data = np.asarray(im_data) + + # Normalize the image's pixels + im_data_normalized = (im_data - 127.5) * 0.0078125 + + return im_data_normalized + + @staticmethod + def __generate_bounding_box(imap, reg, scale, t): + + # use heatmap to generate bounding boxes + stride = 2 + cellsize = 12 + + imap = np.transpose(imap) + dx1 = np.transpose(reg[:, :, 0]) + dy1 = np.transpose(reg[:, :, 1]) + dx2 = np.transpose(reg[:, :, 2]) + dy2 = np.transpose(reg[:, :, 3]) + + y, x = np.where(imap >= t) + + if y.shape[0] == 1: + dx1 = np.flipud(dx1) + dy1 = np.flipud(dy1) + dx2 = np.flipud(dx2) + dy2 = np.flipud(dy2) + + score = imap[(y, x)] + reg = np.transpose(np.vstack([dx1[(y, x)], dy1[(y, x)], dx2[(y, x)], dy2[(y, x)]])) + + if reg.size == 0: + reg = np.empty(shape=(0, 3)) + + bb = np.transpose(np.vstack([y, x])) + + q1 = np.fix((stride * bb + 1) / scale) + q2 = np.fix((stride * bb + cellsize) / scale) + boundingbox = np.hstack([q1, q2, np.expand_dims(score, 1), reg]) + + return boundingbox, reg + + @staticmethod + def __nms(boxes, threshold, method): + """ + Non Maximum Suppression. + + :param boxes: np array with bounding boxes. + :param threshold: + :param method: NMS method to apply. Available values ('Min', 'Union') + :return: + """ + if boxes.size == 0: + return np.empty((0, 3)) + + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + s = boxes[:, 4] + + area = (x2 - x1 + 1) * (y2 - y1 + 1) + sorted_s = np.argsort(s) + + pick = np.zeros_like(s, dtype=np.int16) + counter = 0 + while sorted_s.size > 0: + i = sorted_s[-1] + pick[counter] = i + counter += 1 + idx = sorted_s[0:-1] + + xx1 = np.maximum(x1[i], x1[idx]) + yy1 = np.maximum(y1[i], y1[idx]) + xx2 = np.minimum(x2[i], x2[idx]) + yy2 = np.minimum(y2[i], y2[idx]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + + inter = w * h + + if method == 'Min': + o = inter / np.minimum(area[i], area[idx]) + else: + o = inter / (area[i] + area[idx] - inter) + + sorted_s = sorted_s[np.where(o <= threshold)] + + pick = pick[0:counter] + + return pick + + @staticmethod + def __pad(total_boxes, w, h): + # compute the padding coordinates (pad the bounding boxes to square) + tmpw = (total_boxes[:, 2] - total_boxes[:, 0] + 1).astype(np.int32) + tmph = (total_boxes[:, 3] - total_boxes[:, 1] + 1).astype(np.int32) + numbox = total_boxes.shape[0] + + dx = np.ones(numbox, dtype=np.int32) + dy = np.ones(numbox, dtype=np.int32) + edx = tmpw.copy().astype(np.int32) + edy = tmph.copy().astype(np.int32) + + x = total_boxes[:, 0].copy().astype(np.int32) + y = total_boxes[:, 1].copy().astype(np.int32) + ex = total_boxes[:, 2].copy().astype(np.int32) + ey = total_boxes[:, 3].copy().astype(np.int32) + + tmp = np.where(ex > w) + edx.flat[tmp] = np.expand_dims(-ex[tmp] + w + tmpw[tmp], 1) + ex[tmp] = w + + tmp = np.where(ey > h) + edy.flat[tmp] = np.expand_dims(-ey[tmp] + h + tmph[tmp], 1) + ey[tmp] = h + + tmp = np.where(x < 1) + dx.flat[tmp] = np.expand_dims(2 - x[tmp], 1) + x[tmp] = 1 + + tmp = np.where(y < 1) + dy.flat[tmp] = np.expand_dims(2 - y[tmp], 1) + y[tmp] = 1 + + return dy, edy, dx, edx, y, ey, x, ex, tmpw, tmph + + @staticmethod + def __rerec(bbox): + # convert bbox to square + height = bbox[:, 3] - bbox[:, 1] + width = bbox[:, 2] - bbox[:, 0] + max_side_length = np.maximum(width, height) + bbox[:, 0] = bbox[:, 0] + width * 0.5 - max_side_length * 0.5 + bbox[:, 1] = bbox[:, 1] + height * 0.5 - max_side_length * 0.5 + bbox[:, 2:4] = bbox[:, 0:2] + np.transpose(np.tile(max_side_length, (2, 1))) + return bbox + + @staticmethod + def __bbreg(boundingbox, reg): + # calibrate bounding boxes + if reg.shape[1] == 1: + reg = np.reshape(reg, (reg.shape[2], reg.shape[3])) + + w = boundingbox[:, 2] - boundingbox[:, 0] + 1 + h = boundingbox[:, 3] - boundingbox[:, 1] + 1 + b1 = boundingbox[:, 0] + reg[:, 0] * w + b2 = boundingbox[:, 1] + reg[:, 1] * h + b3 = boundingbox[:, 2] + reg[:, 2] * w + b4 = boundingbox[:, 3] + reg[:, 3] * h + boundingbox[:, 0:4] = np.transpose(np.vstack([b1, b2, b3, b4])) + return boundingbox + + def detect_faces(self, img) -> list: + """ + Detects bounding boxes from the specified image. + :param img: image to process + :return: list containing all the bounding boxes detected with their keypoints. + """ + if img is None or not hasattr(img, "shape"): + raise InvalidImage("Image not valid.") + + height, width, _ = img.shape + stage_status = StageStatus(width=width, height=height) + + m = 12 / self._min_face_size + min_layer = np.amin([height, width]) * m + + scales = self.__compute_scale_pyramid(m, min_layer) + + stages = [self.__stage1, self.__stage2, self.__stage3] + result = [scales, stage_status] + + # We pipe here each of the stages + for stage in stages: + result = stage(img, result[0], result[1]) + + [total_boxes, points] = result + + bounding_boxes = [] + + for bounding_box, keypoints in zip(total_boxes, points.T): + x = max(0, int(bounding_box[0])) + y = max(0, int(bounding_box[1])) + width = int(bounding_box[2] - x) + height = int(bounding_box[3] - y) + bounding_boxes.append({ + 'box': [x, y, width, height], + 'confidence': bounding_box[-1], + 'keypoints': { + 'left_eye': (int(keypoints[0]), int(keypoints[5])), + 'right_eye': (int(keypoints[1]), int(keypoints[6])), + 'nose': (int(keypoints[2]), int(keypoints[7])), + 'mouth_left': (int(keypoints[3]), int(keypoints[8])), + 'mouth_right': (int(keypoints[4]), int(keypoints[9])), + } + }) + + return bounding_boxes + + def __stage1(self, image, scales: list, stage_status: StageStatus): + """ + First stage of the MTCNN. + :param image: + :param scales: + :param stage_status: + :return: + """ + total_boxes = np.empty((0, 9)) + status = stage_status + + for scale in scales: + scaled_image = self.__scale_image(image, scale) + + img_x = np.expand_dims(scaled_image, 0) + img_y = np.transpose(img_x, (0, 2, 1, 3)) + + out = self._pnet.predict(img_y) + + out0 = np.transpose(out[0], (0, 2, 1, 3)) + out1 = np.transpose(out[1], (0, 2, 1, 3)) + + boxes, _ = self.__generate_bounding_box(out1[0, :, :, 1].copy(), + out0[0, :, :, :].copy(), scale, self._steps_threshold[0]) + + # inter-scale nms + pick = self.__nms(boxes.copy(), 0.5, 'Union') + if boxes.size > 0 and pick.size > 0: + boxes = boxes[pick, :] + total_boxes = np.append(total_boxes, boxes, axis=0) + + numboxes = total_boxes.shape[0] + + if numboxes > 0: + pick = self.__nms(total_boxes.copy(), 0.7, 'Union') + total_boxes = total_boxes[pick, :] + + regw = total_boxes[:, 2] - total_boxes[:, 0] + regh = total_boxes[:, 3] - total_boxes[:, 1] + + qq1 = total_boxes[:, 0] + total_boxes[:, 5] * regw + qq2 = total_boxes[:, 1] + total_boxes[:, 6] * regh + qq3 = total_boxes[:, 2] + total_boxes[:, 7] * regw + qq4 = total_boxes[:, 3] + total_boxes[:, 8] * regh + + total_boxes = np.transpose(np.vstack([qq1, qq2, qq3, qq4, total_boxes[:, 4]])) + total_boxes = self.__rerec(total_boxes.copy()) + + total_boxes[:, 0:4] = np.fix(total_boxes[:, 0:4]).astype(np.int32) + status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), + width=stage_status.width, height=stage_status.height) + + return total_boxes, status + + def __stage2(self, img, total_boxes, stage_status: StageStatus): + """ + Second stage of the MTCNN. + :param img: + :param total_boxes: + :param stage_status: + :return: + """ + + num_boxes = total_boxes.shape[0] + if num_boxes == 0: + return total_boxes, stage_status + + # second stage + tempimg = np.zeros(shape=(24, 24, 3, num_boxes)) + + for k in range(0, num_boxes): + tmp = np.zeros((int(stage_status.tmph[k]), int(stage_status.tmpw[k]), 3)) + + tmp[stage_status.dy[k] - 1:stage_status.edy[k], stage_status.dx[k] - 1:stage_status.edx[k], :] = \ + img[stage_status.y[k] - 1:stage_status.ey[k], stage_status.x[k] - 1:stage_status.ex[k], :] + + if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: + # tempimg[:,:,:, k] = cv2.resize(tmp, (24, 24), interpolation=cv2.INTER_AREA) + tempimg[:, :, :, k] = np.asarray(Image.fromarray(np.uint8(tmp)).resize((24, 24), Image.BICUBIC)) + + else: + return np.empty(shape=(0,)), stage_status + + tempimg = (tempimg - 127.5) * 0.0078125 + tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) + + out = self._rnet.predict(tempimg1) + + out0 = np.transpose(out[0]) + out1 = np.transpose(out[1]) + + score = out1[1, :] + + ipass = np.where(score > self._steps_threshold[1]) + + total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) + + mv = out0[:, ipass[0]] + + if total_boxes.shape[0] > 0: + pick = self.__nms(total_boxes, 0.7, 'Union') + total_boxes = total_boxes[pick, :] + total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv[:, pick])) + total_boxes = self.__rerec(total_boxes.copy()) + + return total_boxes, stage_status + + def __stage3(self, img, total_boxes, stage_status: StageStatus): + """ + Third stage of the MTCNN. + + :param img: + :param total_boxes: + :param stage_status: + :return: + """ + num_boxes = total_boxes.shape[0] + if num_boxes == 0: + return total_boxes, np.empty(shape=(0,)) + + total_boxes = np.fix(total_boxes).astype(np.int32) + + status = StageStatus(self.__pad(total_boxes.copy(), stage_status.width, stage_status.height), + width=stage_status.width, height=stage_status.height) + + tempimg = np.zeros((48, 48, 3, num_boxes)) + + for k in range(0, num_boxes): + + tmp = np.zeros((int(status.tmph[k]), int(status.tmpw[k]), 3)) + + tmp[status.dy[k] - 1:status.edy[k], status.dx[k] - 1:status.edx[k], :] = \ + img[status.y[k] - 1:status.ey[k], status.x[k] - 1:status.ex[k], :] + + if tmp.shape[0] > 0 and tmp.shape[1] > 0 or tmp.shape[0] == 0 and tmp.shape[1] == 0: + # tempimg[:,:,:, k] = cv2.resize(tmp, (48, 48), interpolation=cv2.INTER_AREA) + tempimg[:, :, :, k] = np.asarray(Image.fromarray(np.uint8(tmp)).resize((48, 48), Image.BICUBIC)) + else: + return np.empty(shape=(0,)), np.empty(shape=(0,)) + + tempimg = (tempimg - 127.5) * 0.0078125 + tempimg1 = np.transpose(tempimg, (3, 1, 0, 2)) + + out = self._onet.predict(tempimg1) + out0 = np.transpose(out[0]) + out1 = np.transpose(out[1]) + out2 = np.transpose(out[2]) + + score = out2[1, :] + + points = out1 + + ipass = np.where(score > self._steps_threshold[2]) + + points = points[:, ipass[0]] + + total_boxes = np.hstack([total_boxes[ipass[0], 0:4].copy(), np.expand_dims(score[ipass].copy(), 1)]) + + mv = out0[:, ipass[0]] + + w = total_boxes[:, 2] - total_boxes[:, 0] + 1 + h = total_boxes[:, 3] - total_boxes[:, 1] + 1 + + points[0:5, :] = np.tile(w, (5, 1)) * points[0:5, :] + np.tile(total_boxes[:, 0], (5, 1)) - 1 + points[5:10, :] = np.tile(h, (5, 1)) * points[5:10, :] + np.tile(total_boxes[:, 1], (5, 1)) - 1 + + if total_boxes.shape[0] > 0: + total_boxes = self.__bbreg(total_boxes.copy(), np.transpose(mv)) + pick = self.__nms(total_boxes.copy(), 0.7, 'Min') + total_boxes = total_boxes[pick, :] + points = points[:, pick] + + return total_boxes, points diff --git a/photonix/classifiers/face/mtcnn/network.py b/photonix/classifiers/face/mtcnn/network.py new file mode 100644 index 00000000..7c5f3148 --- /dev/null +++ b/photonix/classifiers/face/mtcnn/network.py @@ -0,0 +1,111 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +#MIT License +# +#Copyright (c) 2018 Iván de Paz Centeno +# +#Permission is hereby granted, free of charge, to any person obtaining a copy +#of this software and associated documentation files (the "Software"), to deal +#in the Software without restriction, including without limitation the rights +#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +#copies of the Software, and to permit persons to whom the Software is +#furnished to do so, subject to the following conditions: +# +#The above copyright notice and this permission notice shall be included in all +#copies or substantial portions of the Software. +# +#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +#SOFTWARE. + +import tensorflow as tf + +__author__ = "Iván de Paz Centeno" + + +class Network(object): + + def __init__(self, session, trainable: bool=True): + """ + Initializes the network. + :param trainable: flag to determine if this network should be trainable or not. + """ + self._session = session + self.__trainable = trainable + self.__layers = {} + self.__last_layer_name = None + + with tf.compat.v1.variable_scope(self.__class__.__name__.lower()): + self._config() + + def _config(self): + """ + Configures the network layers. + It is usually done using the LayerFactory() class. + """ + raise NotImplementedError("This method must be implemented by the network.") + + def add_layer(self, name: str, layer_output): + """ + Adds a layer to the network. + :param name: name of the layer to add + :param layer_output: output layer. + """ + self.__layers[name] = layer_output + self.__last_layer_name = name + + def get_layer(self, name: str=None): + """ + Retrieves the layer by its name. + :param name: name of the layer to retrieve. If name is None, it will retrieve the last added layer to the + network. + :return: layer output + """ + if name is None: + name = self.__last_layer_name + + return self.__layers[name] + + def is_trainable(self): + """ + Getter for the trainable flag. + """ + return self.__trainable + + def set_weights(self, weights_values: dict, ignore_missing=False): + """ + Sets the weights values of the network. + :param weights_values: dictionary with weights for each layer + """ + network_name = self.__class__.__name__.lower() + + with tf.compat.v1.variable_scope(network_name): + for layer_name in weights_values: + with tf.compat.v1.variable_scope(layer_name, reuse=True): + for param_name, data in weights_values[layer_name].items(): + try: + var = tf.compat.v1.get_variable(param_name, use_resource=False) + self._session.run(var.assign(data)) + + except ValueError: + if not ignore_missing: + raise + + def feed(self, image): + """ + Feeds the network with an image + :param image: image (perhaps loaded with CV2) + :return: network result + """ + network_name = self.__class__.__name__.lower() + + with tf.compat.v1.variable_scope(network_name): + return self._feed(image) + + def _feed(self, image): + raise NotImplementedError("Method not implemented.") \ No newline at end of file diff --git a/photonix/classifiers/face/mtcnn/network/__init__.py b/photonix/classifiers/face/mtcnn/network/__init__.py new file mode 100755 index 00000000..368de3d0 --- /dev/null +++ b/photonix/classifiers/face/mtcnn/network/__init__.py @@ -0,0 +1,24 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. diff --git a/photonix/classifiers/face/mtcnn/network/factory.py b/photonix/classifiers/face/mtcnn/network/factory.py new file mode 100755 index 00000000..0bfbbe96 --- /dev/null +++ b/photonix/classifiers/face/mtcnn/network/factory.py @@ -0,0 +1,131 @@ +#!/usr/bin/python3 +# -*- coding: utf-8 -*- + +# MIT License +# +# Copyright (c) 2019 Iván de Paz Centeno +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from tensorflow.keras.layers import Input, Dense, Conv2D, MaxPooling2D, PReLU, Flatten, Softmax +from tensorflow.keras.models import Model + +import numpy as np + + +class NetworkFactory: + + def build_pnet(self, input_shape=None): + if input_shape is None: + input_shape = (None, None, 3) + + p_inp = Input(input_shape) + + p_layer = Conv2D(10, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_inp) + p_layer = PReLU(shared_axes=[1, 2])(p_layer) + p_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(p_layer) + + p_layer = Conv2D(16, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer) + p_layer = PReLU(shared_axes=[1, 2])(p_layer) + + p_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(p_layer) + p_layer = PReLU(shared_axes=[1, 2])(p_layer) + + p_layer_out1 = Conv2D(2, kernel_size=(1, 1), strides=(1, 1))(p_layer) + p_layer_out1 = Softmax(axis=3)(p_layer_out1) + + p_layer_out2 = Conv2D(4, kernel_size=(1, 1), strides=(1, 1))(p_layer) + + p_net = Model(p_inp, [p_layer_out2, p_layer_out1]) + + return p_net + + def build_rnet(self, input_shape=None): + if input_shape is None: + input_shape = (24, 24, 3) + + r_inp = Input(input_shape) + + r_layer = Conv2D(28, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_inp) + r_layer = PReLU(shared_axes=[1, 2])(r_layer) + r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(r_layer) + + r_layer = Conv2D(48, kernel_size=(3, 3), strides=(1, 1), padding="valid")(r_layer) + r_layer = PReLU(shared_axes=[1, 2])(r_layer) + r_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(r_layer) + + r_layer = Conv2D(64, kernel_size=(2, 2), strides=(1, 1), padding="valid")(r_layer) + r_layer = PReLU(shared_axes=[1, 2])(r_layer) + r_layer = Flatten()(r_layer) + r_layer = Dense(128)(r_layer) + r_layer = PReLU()(r_layer) + + r_layer_out1 = Dense(2)(r_layer) + r_layer_out1 = Softmax(axis=1)(r_layer_out1) + + r_layer_out2 = Dense(4)(r_layer) + + r_net = Model(r_inp, [r_layer_out2, r_layer_out1]) + + return r_net + + def build_onet(self, input_shape=None): + if input_shape is None: + input_shape = (48, 48, 3) + + o_inp = Input(input_shape) + o_layer = Conv2D(32, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_inp) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="same")(o_layer) + + o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + o_layer = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding="valid")(o_layer) + + o_layer = Conv2D(64, kernel_size=(3, 3), strides=(1, 1), padding="valid")(o_layer) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + o_layer = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="same")(o_layer) + + o_layer = Conv2D(128, kernel_size=(2, 2), strides=(1, 1), padding="valid")(o_layer) + o_layer = PReLU(shared_axes=[1, 2])(o_layer) + + o_layer = Flatten()(o_layer) + o_layer = Dense(256)(o_layer) + o_layer = PReLU()(o_layer) + + o_layer_out1 = Dense(2)(o_layer) + o_layer_out1 = Softmax(axis=1)(o_layer_out1) + o_layer_out2 = Dense(4)(o_layer) + o_layer_out3 = Dense(10)(o_layer) + + o_net = Model(o_inp, [o_layer_out2, o_layer_out3, o_layer_out1]) + return o_net + + def build_P_R_O_nets_from_file(self, weights_file): + weights = np.load(weights_file, allow_pickle=True).tolist() + + p_net = self.build_pnet() + r_net = self.build_rnet() + o_net = self.build_onet() + + p_net.set_weights(weights['pnet']) + r_net.set_weights(weights['rnet']) + o_net.set_weights(weights['onet']) + + return p_net, r_net, o_net diff --git a/photonix/classifiers/info.py b/photonix/classifiers/info.py index c14650d0..5f84908d 100644 --- a/photonix/classifiers/info.py +++ b/photonix/classifiers/info.py @@ -1,6 +1,8 @@ CLASSIFIERS = [ 'color', 'location', - 'object', + 'face', 'style', + 'object', + 'event', ] diff --git a/photonix/classifiers/location/model.py b/photonix/classifiers/location/model.py index cb78fee1..47359b4f 100644 --- a/photonix/classifiers/location/model.py +++ b/photonix/classifiers/location/model.py @@ -206,7 +206,6 @@ def run_on_photo(photo_id): photo, results = results_for_model_on_photo(model, photo_id) if photo and results['country']: - from django.utils import timezone from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='L') country_tag = get_or_create_tag(library=photo.library, name=results['country']['name'], type='L', source='C') @@ -214,9 +213,6 @@ def run_on_photo(photo_id): if results['city']: city_tag = get_or_create_tag(library=photo.library, name=results['city']['name'], type='L', source='C', parent=country_tag) PhotoTag(photo=photo, tag=city_tag, source='C', confidence=0.5, significance=0.5).save() - photo.classifier_color_completed_at = timezone.now() - photo.classifier_color_version = getattr(model, 'version', 0) - photo.save() return photo, results diff --git a/photonix/classifiers/object/model.py b/photonix/classifiers/object/model.py index af4c179b..15db6106 100644 --- a/photonix/classifiers/object/model.py +++ b/photonix/classifiers/object/model.py @@ -2,17 +2,18 @@ import sys from pathlib import Path +from django.utils import timezone import numpy as np from PIL import Image - -import redis from redis_lock import Lock import tensorflow as tf from photonix.classifiers.object.utils import label_map_util from photonix.classifiers.base_model import BaseModel +from photonix.photos.utils.redis import redis_connection +from photonix.photos.utils.metadata import PhotoMetadata + -r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) GRAPH_FILE = os.path.join('object', 'ssd_mobilenet_v2_oid_v4_2018_12_12_frozen_inference_graph.pb') LABEL_FILE = os.path.join('object', 'oid_v4_label_map.pbtxt') @@ -33,7 +34,7 @@ def __init__(self, model_dir=None, graph_file=GRAPH_FILE, label_file=LABEL_FILE, self.labels = self.load_labels(label_file) def load_graph(self, graph_file): - with Lock(r, 'classifier_{}_load_graph'.format(self.name)): + with Lock(redis_connection, 'classifier_{}_load_graph'.format(self.name)): if self.graph_cache_key in self.graph_cache: return self.graph_cache[self.graph_cache_key] @@ -115,6 +116,17 @@ def format_output(self, output_dict, min_score): def predict(self, image_file, min_score=0.1): image = Image.open(image_file) + + if image.mode != 'RGB': + image = image.convert('RGB') + + # Perform rotations if decalared in metadata + metadata = PhotoMetadata(image_file) + if metadata.get('Orientation') in ['Rotate 90 CW', 'Rotate 270 CCW']: + image = image.rotate(-90, expand=True) + elif metadata.get('Orientation') in ['Rotate 90 CCW', 'Rotate 270 CW']: + image = image.rotate(90, expand=True) + # the array based representation of the image will be used later in order to prepare the # result image with boxes and labels on it. image_np = self.load_image_into_numpy_array(image) @@ -132,12 +144,12 @@ def run_on_photo(photo_id): photo, results = results_for_model_on_photo(model, photo_id) if photo: - from django.utils import timezone from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='O') for result in results: - tag = get_or_create_tag(library=photo.library, name=result['label'], type='O', source='C') - PhotoTag(photo=photo, tag=tag, source='C', confidence=result['score'], significance=result['significance'], position_x=result['x'], position_y=result['y'], size_x=result['width'], size_y=result['height']).save() + if result['label'] != 'Human face': # We have a specialised face detector + tag = get_or_create_tag(library=photo.library, name=result['label'], type='O', source='C') + PhotoTag(photo=photo, tag=tag, source='C', confidence=result['score'], significance=result['significance'], position_x=result['x'], position_y=result['y'], size_x=result['width'], size_y=result['height']).save() photo.classifier_object_completed_at = timezone.now() photo.classifier_object_version = getattr(model, 'version', 0) photo.save() @@ -148,7 +160,7 @@ def run_on_photo(photo_id): if __name__ == '__main__': model = ObjectModel() if len(sys.argv) != 2: - print('Argument required: image file path') + print('Argument required: image file path or Photo ID') exit(1) results = run_on_photo(sys.argv[1]) diff --git a/photonix/classifiers/runners.py b/photonix/classifiers/runners.py index 49473514..d5fb1f3f 100644 --- a/photonix/classifiers/runners.py +++ b/photonix/classifiers/runners.py @@ -1,4 +1,6 @@ +import os import re +from time import sleep from uuid import UUID @@ -17,7 +19,7 @@ def get_or_create_tag(library, name, type, source, parent=None, ordering=None): return tag -def results_for_model_on_photo(model, photo_id): +def get_photo_by_any_type(photo_id, model=None): is_photo_instance = False photo = None @@ -31,13 +33,26 @@ def results_for_model_on_photo(model, photo_id): # Is an individual filename so return the prediction if not is_photo_instance: - return None, model.predict(photo_id) + return None # Is a Photo model instance so needs saving if not photo: + # Handle running scripts from command line and Photo IDs + if not os.environ.get('DJANGO_SETTINGS_MODULE'): + os.environ.setdefault("DJANGO_SETTINGS_MODULE", "photonix.web.settings") + import django + django.setup() + from photonix.photos.models import Photo photo = Photo.objects.get(id=photo_id) - results = model.predict(photo.base_image_path) + return is_photo_instance and photo or None - return is_photo_instance and photo or None, results + +def results_for_model_on_photo(model, photo_id): + photo = get_photo_by_any_type(photo_id, model) + if photo: + results = model.predict(photo.base_image_path) + else: + results = model.predict(photo_id) + return photo, results diff --git a/photonix/classifiers/style/model.py b/photonix/classifiers/style/model.py index 7aae2e97..097fdf97 100644 --- a/photonix/classifiers/style/model.py +++ b/photonix/classifiers/style/model.py @@ -4,11 +4,12 @@ import numpy as np -import redis -import tensorflow as tf from redis_lock import Lock +import tensorflow as tf from photonix.classifiers.base_model import BaseModel +from photonix.photos.utils.redis import redis_connection +from photonix.web.utils import logger GRAPH_FILE = os.path.join('style', 'graph.pb') @@ -34,8 +35,7 @@ def __init__(self, model_dir=None, graph_file=GRAPH_FILE, label_file=LABEL_FILE, self.labels = self.load_labels(label_file) def load_graph(self, graph_file): - r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) - with Lock(r, 'classifier_{}_load_graph'.format(self.name)): + with Lock(redis_connection, 'classifier_{}_load_graph'.format(self.name)): if self.graph_cache_key in self.graph_cache: return self.graph_cache[self.graph_cache_key] @@ -72,6 +72,10 @@ def predict(self, image_file, min_score=0.66): input_mean=input_mean, input_std=input_std) + if t is None: + logger.info(f'Skipping {image_file}, file format not supported by Tensorflow') + return None + input_name = "import/" + input_layer output_name = "import/" + output_layer input_operation = self.graph.get_operation_by_name(input_name) @@ -91,22 +95,25 @@ def predict(self, image_file, min_score=0.66): def read_tensor_from_image_file(self, file_name, input_height=299, input_width=299, input_mean=0, input_std=255): input_name = "file_reader" - - file_reader = tf.io.read_file(file_name, input_name) - if file_name.endswith(".png"): - image_reader = tf.image.decode_png(file_reader, channels=3, name='png_reader') - elif file_name.endswith(".gif"): - image_reader = tf.squeeze(tf.image.decode_gif(file_reader, name='gif_reader')) - elif file_name.endswith(".bmp"): - image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader') - else: - image_reader = tf.image.decode_jpeg(file_reader, channels=3, name='jpeg_reader') - float_caster = tf.cast(image_reader, tf.float32) - dims_expander = tf.expand_dims(float_caster, 0) - resized = tf.image.resize(dims_expander, [input_height, input_width], method=tf.image.ResizeMethod.BILINEAR, antialias=True) - normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std]) - sess = tf.compat.v1.Session() - return sess.run(normalized) + try: + + file_reader = tf.io.read_file(file_name, input_name) + if file_name.endswith(".png"): + image_reader = tf.image.decode_png(file_reader, channels=3, name='png_reader') + elif file_name.endswith(".gif"): + image_reader = tf.squeeze(tf.image.decode_gif(file_reader, name='gif_reader')) + elif file_name.endswith(".bmp"): + image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader') + else: + image_reader = tf.image.decode_jpeg(file_reader, channels=3, name='jpeg_reader') + float_caster = tf.cast(image_reader, tf.float32) + dims_expander = tf.expand_dims(float_caster, 0) + resized = tf.image.resize(dims_expander, [input_height, input_width], method=tf.image.ResizeMethod.BILINEAR, antialias=True) + normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std]) + sess = tf.compat.v1.Session() + return sess.run(normalized) + except: + return None def run_on_photo(photo_id): @@ -115,16 +122,12 @@ def run_on_photo(photo_id): from photonix.classifiers.runners import results_for_model_on_photo, get_or_create_tag photo, results = results_for_model_on_photo(model, photo_id) - if photo: - from django.utils import timezone + if photo and results is not None: from photonix.photos.models import PhotoTag photo.clear_tags(source='C', type='S') for name, score in results: tag = get_or_create_tag(library=photo.library, name=name, type='S', source='C') PhotoTag(photo=photo, tag=tag, source='C', confidence=score, significance=score).save() - photo.classifier_style_completed_at = timezone.now() - photo.classifier_style_version = getattr(model, 'version', 0) - photo.save() return photo, results @@ -137,5 +140,8 @@ def run_on_photo(photo_id): results = model.predict(sys.argv[1], min_score=0.01) - for label, score in results: - print('{} (score: {:0.5f})'.format(label, score)) + if results is None: + print(f'{sys.argv[1]} could not be processed by style classifier') + else: + for label, score in results: + print('{} (score: {:0.5f})'.format(label, score)) diff --git a/photonix/manage.py b/photonix/manage.py index 53daf0a7..68501c16 100755 --- a/photonix/manage.py +++ b/photonix/manage.py @@ -2,6 +2,7 @@ import os import sys + if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "photonix.web.settings") try: diff --git a/photonix/photos/admin.py b/photonix/photos/admin.py index 66c6f016..00a81b79 100644 --- a/photonix/photos/admin.py +++ b/photonix/photos/admin.py @@ -24,14 +24,14 @@ class LibraryPathInline(admin.TabularInline): class LibraryAdmin(VersionedAdmin): - list_display = ('name', 'classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'setup_stage_completed', 'created_at', 'updated_at') + list_display = ('name', 'classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'classification_face_enabled', 'setup_stage_completed', 'created_at', 'updated_at') list_ordering = ('name',) - list_filter = ('classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'setup_stage_completed',) + list_filter = ('classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'classification_face_enabled' ,'setup_stage_completed',) inlines = [LibraryUserInline, LibraryPathInline] fieldsets = ( (None, { - 'fields': ('name', 'classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'setup_stage_completed'), + 'fields': ('name', 'classification_color_enabled', 'classification_location_enabled', 'classification_style_enabled', 'classification_object_enabled', 'classification_face_enabled','setup_stage_completed'), }), ) + VersionedAdmin.fieldsets diff --git a/photonix/photos/management/commands/classification_color_processor.py b/photonix/photos/management/commands/classification_color_processor.py index eca7419b..b5d23308 100644 --- a/photonix/photos/management/commands/classification_color_processor.py +++ b/photonix/photos/management/commands/classification_color_processor.py @@ -1,11 +1,12 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.color import ColorModel, run_on_photo -from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger -print('Loading color model') +logger.debug('Loading color model') model = ColorModel() diff --git a/photonix/photos/management/commands/classification_event_processor.py b/photonix/photos/management/commands/classification_event_processor.py new file mode 100644 index 00000000..b9493868 --- /dev/null +++ b/photonix/photos/management/commands/classification_event_processor.py @@ -0,0 +1,23 @@ +from django.core.management.base import BaseCommand + +# Pre-load the model graphs so it doesn't have to be done for each job +from photonix.classifiers.event import EventModel, run_on_photo +from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger + + +logger.debug('Loading event model') +model = EventModel() + + +class Command(BaseCommand): + help = 'Runs the workers with the event classification model.' + + def run_processors(self): + num_workers = 1 + batch_size = 64 + threaded_queue_processor = ThreadedQueueProcessor(model, 'classify.event', run_on_photo, num_workers, batch_size) + threaded_queue_processor.run() + + def handle(self, *args, **options): + self.run_processors() diff --git a/photonix/photos/management/commands/classification_face_processor.py b/photonix/photos/management/commands/classification_face_processor.py new file mode 100644 index 00000000..7f1a6dbb --- /dev/null +++ b/photonix/photos/management/commands/classification_face_processor.py @@ -0,0 +1,22 @@ +from django.core.management.base import BaseCommand + +# Pre-load the model graphs so it doesn't have to be done for each job +from photonix.classifiers.face import run_on_photo +from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger + + +model = None + + +class Command(BaseCommand): + help = 'Runs the workers with the face detection and recognition model.' + + def run_processors(self): + num_workers = 1 + batch_size = 64 + threaded_queue_processor = ThreadedQueueProcessor(model, 'classify.face', run_on_photo, num_workers, batch_size) + threaded_queue_processor.run() + + def handle(self, *args, **options): + self.run_processors() diff --git a/photonix/photos/management/commands/classification_location_processor.py b/photonix/photos/management/commands/classification_location_processor.py index 7a31c8c7..495cb3df 100644 --- a/photonix/photos/management/commands/classification_location_processor.py +++ b/photonix/photos/management/commands/classification_location_processor.py @@ -1,11 +1,12 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.location import LocationModel, run_on_photo -from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger -print('Loading location model') +logger.debug('Loading location model') model = LocationModel() diff --git a/photonix/photos/management/commands/classification_object_processor.py b/photonix/photos/management/commands/classification_object_processor.py index 6c813de0..b89e2eec 100644 --- a/photonix/photos/management/commands/classification_object_processor.py +++ b/photonix/photos/management/commands/classification_object_processor.py @@ -1,11 +1,12 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.object import ObjectModel, run_on_photo -from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger -print('Loading object classification model') +logger.debug('Loading object classification model') model = ObjectModel() diff --git a/photonix/photos/management/commands/classification_scheduler.py b/photonix/photos/management/commands/classification_scheduler.py index e840785f..d5aacb14 100644 --- a/photonix/photos/management/commands/classification_scheduler.py +++ b/photonix/photos/management/commands/classification_scheduler.py @@ -4,6 +4,7 @@ from photonix.photos.models import Task from photonix.photos.utils.classification import process_classify_images_tasks +from photonix.web.utils import logger class Command(BaseCommand): @@ -13,7 +14,7 @@ def run_scheduler(self): while True: num_remaining = Task.objects.filter(type='classify_images', status='P').count() if num_remaining: - print('{} photos remaining for classification'.format(num_remaining)) + logger.info('{} photos remaining for classification'.format(num_remaining)) process_classify_images_tasks() sleep(1) diff --git a/photonix/photos/management/commands/classification_style_processor.py b/photonix/photos/management/commands/classification_style_processor.py index af837d82..5e379819 100644 --- a/photonix/photos/management/commands/classification_style_processor.py +++ b/photonix/photos/management/commands/classification_style_processor.py @@ -1,11 +1,12 @@ from django.core.management.base import BaseCommand + # Pre-load the model graphs so it doesn't have to be done for each job from photonix.classifiers.style import StyleModel, run_on_photo -from photonix.photos.models import Task from photonix.photos.utils.classification import ThreadedQueueProcessor +from photonix.web.utils import logger -print('Loading style classification model') +logger.debug('Loading style classification model') model = StyleModel() diff --git a/photonix/photos/management/commands/create_library.py b/photonix/photos/management/commands/create_library.py index 80373c0d..8f4eaddd 100644 --- a/photonix/photos/management/commands/create_library.py +++ b/photonix/photos/management/commands/create_library.py @@ -1,4 +1,5 @@ import os +import argparse from pathlib import Path from django.contrib.auth import get_user_model @@ -16,7 +17,7 @@ class Command(BaseCommand): help = 'Create a library for a user' - def create_library(self, username, library_name): + def create_library(self, username, library_name, path): # Get user user = User.objects.get(username=username) # Create Library @@ -27,8 +28,7 @@ def create_library(self, username, library_name): library=library, type='St', backend_type='Lo', - path='/data/photos/', - url='/photos/', + path=path, ) library_user, _ = LibraryUser.objects.get_or_create( library=library, @@ -36,12 +36,19 @@ def create_library(self, username, library_name): owner=True, ) - print(f'Library "{library_name}" created successfully for user "{username}"') + print(f'Library "{library_name}" with path "{path}" created successfully for user "{username}"') + + def is_path_dir(self, path): + if os.path.isdir(path): + return path + else: + raise argparse.ArgumentTypeError(f"{path} is not a valid folder") def add_arguments(self, parser): # Positional arguments - parser.add_argument('username', nargs='+', type=str) - parser.add_argument('library_name', nargs='+', type=str) + parser.add_argument('username', type=str) + parser.add_argument('library_name', type=str) + parser.add_argument('--path', type=self.is_path_dir, default='/data/photos') def handle(self, *args, **options): - self.create_library(options['username'][0], options['library_name'][0]) + self.create_library(options['username'], options['library_name'], options['path']) diff --git a/photonix/photos/management/commands/create_user.py b/photonix/photos/management/commands/create_user.py new file mode 100644 index 00000000..a6d068fa --- /dev/null +++ b/photonix/photos/management/commands/create_user.py @@ -0,0 +1,84 @@ +from getpass import getpass +import sys + +from django.contrib.auth import get_user_model +from django.core.management.base import BaseCommand + +from photonix.photos.models import Library, LibraryUser + + +User = get_user_model() + + +class Command(BaseCommand): + '''Management command to create user and assign them to libararies.''' + + help = 'Assign library to user' + + def create_user(self, username, password): + '''To create user and assign to libraries.''' + if not username: + username = input('\nPlease enter username: ') + if User.objects.filter(username=username).exists(): + print(f'User "{username}" already exists') + self.show_libraries_list(User.objects.get(username=username)) + else: + self.validate_password(username, password) + + def show_libraries_list(self, user): + '''Method to show library list.''' + print('\nCurrent libraries:\n ') + lib_num_obj_pair_list = [] + lib_sequence_list = [] + for count, lib_obj in enumerate(Library.objects.all(), start=1): + print(f' {count}) {lib_obj.name}') + lib_num_obj_pair_list.append((count, lib_obj)) + lib_sequence_list.append(count) + self.assign_user_to_library(lib_num_obj_pair_list, user, lib_sequence_list) + + def validate_password(self, username, password=None): + '''Method to validate the password.''' + if not password: + password = getpass('Please enter password (hidden): ') + if len(password) >= 8: + user = User.objects.create(username=username) + user.set_password(password) + user.save() + print(f'\nUser created with name "{username}"') + self.show_libraries_list(user) + else: + print('Password must be at least 8 characters long!') + self.validate_password(username) + + def assign_user_to_library(self, lib_num_obj_pair_list, user, lib_sequence_list): + '''Method to assign user to selected libarary.''' + entered_lib_num = input('\nPlease enter the number of a library you want the user to be able to access: ') + if not (entered_lib_num.isdigit() and int(entered_lib_num) in lib_sequence_list): + print('You have entered invalid library number.') + self.assign_user_to_library(lib_num_obj_pair_list, user, lib_sequence_list) + for sequence_number, obj in lib_num_obj_pair_list: + if int(entered_lib_num) == sequence_number: + LibraryUser.objects.get_or_create(library=obj, user=user, owner=True) + print(f'\nUser "{user.username}" assigned to library "{obj.name}"\n') + self.continue_the_process(lib_num_obj_pair_list, user, lib_sequence_list) + + def continue_the_process(self, lib_num_obj_pair_list, user, lib_sequence_list): + '''Method to continue the process if user wants to allocate user object to another libraries.''' + continue_or_not = input('Do you want to add user to another library? Enter Y or N: ') + if continue_or_not.upper() == 'Y': + self.assign_user_to_library(lib_num_obj_pair_list, user, lib_sequence_list) + elif continue_or_not.upper() == 'N': + sys.exit() # we can also write here 'pass' but to avoid unnecessary loop running we used exit() + else: + print('Please enter only Y or N') + self.continue_the_process(lib_num_obj_pair_list, user, lib_sequence_list) + + def add_arguments(self, parser): + '''To pass argumentes in management command.''' + # Optional or named arguments + parser.add_argument('--username', type=str, help='Take username') + parser.add_argument('--password', type=str, help='Take password') + + def handle(self, *args, **options): + '''Method in which we call management command with passed arguments.''' + self.create_user(options.get('username'), options.get('password')) diff --git a/photonix/photos/management/commands/delete_all_photos.py b/photonix/photos/management/commands/delete_all_photos.py index b845a74b..1a4da215 100644 --- a/photonix/photos/management/commands/delete_all_photos.py +++ b/photonix/photos/management/commands/delete_all_photos.py @@ -5,6 +5,7 @@ from django.core.management.base import BaseCommand from photonix.photos.models import Camera, Lens, Photo, PhotoFile, Tag +from photonix.web.utils import logger class Command(BaseCommand): @@ -19,7 +20,7 @@ def clear_dir(self, path): elif os.path.isdir(file_path): shutil.rmtree(file_path) except Exception as e: - print(e) + logger.error(e) def delete_all_photos(self): Camera.objects.all().delete() diff --git a/photonix/photos/management/commands/housekeeping.py b/photonix/photos/management/commands/housekeeping.py index 3db8c916..5ec98606 100644 --- a/photonix/photos/management/commands/housekeeping.py +++ b/photonix/photos/management/commands/housekeeping.py @@ -8,6 +8,7 @@ from photonix.photos.models import Photo, Task from photonix.photos.utils.thumbnails import THUMBNAILER_VERSION +from photonix.web.utils import logger class Command(BaseCommand): @@ -15,16 +16,19 @@ class Command(BaseCommand): def housekeeping(self): # Remove old cache directories - for directory in os.listdir(settings.THUMBNAIL_ROOT): - if directory not in ['photofile']: - path = Path(settings.THUMBNAIL_ROOT) / directory - print(f'Removing old cache directory {path}') - rmtree(path) + try: + for directory in os.listdir(settings.THUMBNAIL_ROOT): + if directory not in ['photofile']: + path = Path(settings.THUMBNAIL_ROOT) / directory + logger.info(f'Removing old cache directory {path}') + rmtree(path) + except FileNotFoundError: # In case thumbnail dir hasn't been created yet + pass # Regenerate any outdated thumbnails photos = Photo.objects.filter(thumbnailed_version__lt=THUMBNAILER_VERSION) if photos.count(): - print(f'Rescheduling {photos.count()} photos to have their thumbnails regenerated') + logger.info(f'Rescheduling {photos.count()} photos to have their thumbnails regenerated') for photo in photos: Task( type='generate_thumbnails', subject_id=photo.id, diff --git a/photonix/photos/management/commands/import_demo_photos.py b/photonix/photos/management/commands/import_demo_photos.py index 96157af2..82326026 100644 --- a/photonix/photos/management/commands/import_demo_photos.py +++ b/photonix/photos/management/commands/import_demo_photos.py @@ -8,6 +8,7 @@ from photonix.photos.models import Library, LibraryPath, LibraryUser from photonix.photos.utils.db import record_photo from photonix.photos.utils.fs import determine_destination, download_file +from photonix.web.utils import logger User = get_user_model() @@ -42,14 +43,26 @@ def import_photos(self): user.save() except IntegrityError: user = User.objects.get(username='demo') + # Create Library - library, _ = Library.objects.get_or_create( - name='Demo Library', - # base_thumbnail_path='/data/cache/thumbnails/', - # base_thumbnail_url='/thumbnails/' - ) + try: + library = Library.objects.get( + name='Demo Library', + ) + except Library.DoesNotExist: + library = Library( + name='Demo Library', + classification_color_enabled=True, + classification_location_enabled=True, + classification_style_enabled=True, + classification_object_enabled=True, + classification_face_enabled=True, + setup_stage_completed='Th' + ) + library.save() + # LibraryPath as locally mounted volume - library_path, _ = LibraryPath.objects.get_or_create( + LibraryPath.objects.get_or_create( library=library, type='St', backend_type='Lo', @@ -61,7 +74,7 @@ def import_photos(self): # In dev environment user needs to be owner to access all functionality # but demo.photonix.org this could lead to the system being messed up owner = os.environ.get('ENV') == 'dev' - library_user, _ = LibraryUser.objects.get_or_create( + LibraryUser.objects.get_or_create( library=library, user=user, owner=owner @@ -74,7 +87,7 @@ def import_photos(self): dest_path = str(Path(dest_dir) / fn) if not os.path.exists(dest_path): - print('Fetching {} -> {}'.format(url, dest_path)) + logger.info('Fetching {} -> {}'.format(url, dest_path)) download_file(url, dest_path) record_photo(dest_path, library) diff --git a/photonix/photos/management/commands/import_photos.py b/photonix/photos/management/commands/import_photos.py index 0c7e5111..71520fbd 100644 --- a/photonix/photos/management/commands/import_photos.py +++ b/photonix/photos/management/commands/import_photos.py @@ -2,6 +2,7 @@ from photonix.photos.utils.organise import import_photos_from_dir from photonix.photos.utils.system import missing_system_dependencies +from photonix.web.utils import logger class Command(BaseCommand): @@ -13,7 +14,7 @@ def add_arguments(self, parser): def import_photos(self, paths): missing = missing_system_dependencies(['exiftool', ]) if missing: - print('Missing dependencies: {}'.format(missing)) + logger.critical('Missing dependencies: {}'.format(missing)) exit(1) for path in paths: diff --git a/photonix/photos/management/commands/raw_processor.py b/photonix/photos/management/commands/raw_processor.py index 029bf696..16ff4f13 100644 --- a/photonix/photos/management/commands/raw_processor.py +++ b/photonix/photos/management/commands/raw_processor.py @@ -8,6 +8,8 @@ from photonix.photos.models import Task from photonix.photos.utils.raw import process_raw_task from photonix.photos.utils.tasks import requeue_stuck_tasks +from photonix.web.utils import logger + q = queue.Queue() @@ -28,10 +30,10 @@ class Command(BaseCommand): help = 'Processes raw photos into a JPEG we can use elsewhere.' def run_processors(self): - num_workers = cpu_count() + num_workers = max(int(cpu_count() / 4), 1) threads = [] - print('Starting {} raw processor workers\n'.format(num_workers)) + logger.info(f'Starting {num_workers} raw processor workers') for i in range(num_workers): t = threading.Thread(target=worker) @@ -44,12 +46,12 @@ def run_processors(self): num_remaining = Task.objects.filter(type='process_raw', status='P').count() if num_remaining: - print('{} tasks remaining for raw processing'.format(num_remaining)) + logger.info(f'{num_remaining} tasks remaining for raw processing') # Load 'Pending' tasks onto worker threads for task in Task.objects.filter(type='process_raw', status='P')[:64]: q.put(task) - print('Finished raw processing batch') + logger.info('Finished raw processing batch') # Wait until all threads have finished q.join() diff --git a/photonix/photos/management/commands/raw_scheduler.py b/photonix/photos/management/commands/raw_scheduler.py index d8b0db8e..9c43601f 100644 --- a/photonix/photos/management/commands/raw_scheduler.py +++ b/photonix/photos/management/commands/raw_scheduler.py @@ -4,6 +4,7 @@ from photonix.photos.models import Task from photonix.photos.utils.raw import ensure_raw_processing_tasks +from photonix.web.utils import logger class Command(BaseCommand): @@ -13,9 +14,9 @@ def run_scheduler(self): while True: num_remaining = Task.objects.filter(type='ensure_raw_processed', status='P').count() if num_remaining: - print('{} tasks remaining for raw process scheduling'.format(num_remaining)) + logger.info(f'{num_remaining} tasks remaining for raw process scheduling') ensure_raw_processing_tasks() - print('Finished raw process scheduling') + logger.info('Finished raw process scheduling') sleep(1) def handle(self, *args, **options): diff --git a/photonix/photos/management/commands/rescan_photos.py b/photonix/photos/management/commands/rescan_photos.py index 99ac025c..7a125994 100644 --- a/photonix/photos/management/commands/rescan_photos.py +++ b/photonix/photos/management/commands/rescan_photos.py @@ -1,9 +1,11 @@ from django.conf import settings from django.core.management.base import BaseCommand +from redis_lock import Lock +from photonix.photos.utils.redis import redis_connection from photonix.photos.utils.organise import rescan_photo_libraries from photonix.photos.utils.system import missing_system_dependencies -# from web.utils import notify_ui +from photonix.web.utils import logger class Command(BaseCommand): @@ -15,13 +17,12 @@ def add_arguments(self, parser): def rescan_photos(self, paths): missing = missing_system_dependencies(['exiftool', ]) if missing: - print('Missing dependencies: {}'.format(missing)) + logger.critical(f'Missing dependencies: {missing}') exit(1) rescan_photo_libraries(paths) - print('Completed') + logger.info('Rescan complete') def handle(self, *args, **options): - # notify_ui('photo_dirs_scanning', True) - self.rescan_photos(options['paths']) - # notify_ui('photo_dirs_scanning', False) + with Lock(redis_connection, 'rescan_photos'): + self.rescan_photos(options['paths']) diff --git a/photonix/photos/management/commands/rescan_photos_periodically.py b/photonix/photos/management/commands/rescan_photos_periodically.py index 2ac184c9..0e19478a 100644 --- a/photonix/photos/management/commands/rescan_photos_periodically.py +++ b/photonix/photos/management/commands/rescan_photos_periodically.py @@ -1,9 +1,13 @@ from time import sleep + from django.conf import settings from django.core.management.base import BaseCommand +from redis_lock import Lock from photonix.photos.utils.organise import rescan_photo_libraries from photonix.photos.utils.system import missing_system_dependencies +from photonix.photos.utils.redis import redis_connection +from photonix.web.utils import logger class Command(BaseCommand): @@ -15,18 +19,17 @@ def add_arguments(self, parser): def rescan_photos(self, paths): missing = missing_system_dependencies(['exiftool', ]) if missing: - print('Missing dependencies: {}'.format(missing)) + logger.critical(f'Missing dependencies: {missing}') exit(1) rescan_photo_libraries(paths) - print('Rescan complete') + logger.info('Rescan complete') def handle(self, *args, **options): try: while True: - # TODO: Add a lock in here because DB corruption occurs if rescan_photos is called while it's still already running - self.rescan_photos(options['paths']) - - sleep(60 * 15) # Sleep for an hour + with Lock(redis_connection, 'rescan_photos'): + self.rescan_photos(options['paths']) + sleep(60 * 60) # Sleep for an hour except KeyboardInterrupt: pass \ No newline at end of file diff --git a/photonix/photos/management/commands/reset_redis_locks.py b/photonix/photos/management/commands/reset_redis_locks.py index c119d316..970a44f0 100644 --- a/photonix/photos/management/commands/reset_redis_locks.py +++ b/photonix/photos/management/commands/reset_redis_locks.py @@ -1,16 +1,11 @@ -import os - -import redis_lock - from django.core.management.base import BaseCommand -import redis - +import redis_lock -r = redis.Redis(host=os.environ.get('REDIS_HOST', '127.0.0.1')) +from photonix.photos.utils.redis import redis_connection class Command(BaseCommand): help = 'Removes all Redis locks - intended to be run on server start.' def handle(self, *args, **options): - redis_lock.reset_all(r) + redis_lock.reset_all(redis_connection) diff --git a/photonix/photos/management/commands/retrain_face_similarity_index.py b/photonix/photos/management/commands/retrain_face_similarity_index.py new file mode 100644 index 00000000..8b5a9914 --- /dev/null +++ b/photonix/photos/management/commands/retrain_face_similarity_index.py @@ -0,0 +1,44 @@ +from datetime import datetime +import json +import os +from pathlib import Path +from time import time + +from django.conf import settings +from django.core.management.base import BaseCommand +from django.utils import timezone + +from photonix.photos.models import Library, PhotoTag +from photonix.classifiers.face.model import FaceModel +from photonix.web.utils import logger + + +class Command(BaseCommand): + help = 'Creates Approximate Nearest Neighbour (ANN) search index for quickly finding closest face without having to compare one-by-one.' + + def retrain_face_similarity_index(self): + for library in Library.objects.all(): + version_file = Path(settings.MODEL_DIR) / 'face' / f'{library.id}_retrained_version.txt' + version_date = None + + if os.path.exists(version_file): + with open(version_file) as f: + contents = f.read().strip() + version_date = datetime.strptime(contents, '%Y%m%d%H%M%S').replace(tzinfo=timezone.utc) + + start = time() + logger.info(f'Updating ANN index for Library {library.id}') + + if PhotoTag.objects.filter(tag__type='F').count() == 0: + logger.info(' No Face PhotoTags in Library so no point in creating face ANN index yet') + return + if version_date and PhotoTag.objects.filter(updated_at__gt=version_date, tag__type='F').count() == 0: + logger.info(' No new Face PhotoTags in Library so no point in updating face ANN index') + return + + FaceModel(library_id=library.id).retrain_face_similarity_index() + + logger.info(f' Completed in {(time() - start):.3f}s') + + def handle(self, *args, **options): + self.retrain_face_similarity_index() diff --git a/photonix/photos/management/commands/thumbnail_processor.py b/photonix/photos/management/commands/thumbnail_processor.py index c2eb3d92..e20be367 100644 --- a/photonix/photos/management/commands/thumbnail_processor.py +++ b/photonix/photos/management/commands/thumbnail_processor.py @@ -8,6 +8,8 @@ from photonix.photos.models import Task from photonix.photos.utils.tasks import requeue_stuck_tasks from photonix.photos.utils.thumbnails import generate_thumbnails_for_photo +from photonix.web.utils import logger + q = queue.Queue() @@ -28,10 +30,10 @@ class Command(BaseCommand): help = 'Processes full-sized photos into thumbnails of various sizes.' def run_processors(self): - num_workers = cpu_count() + num_workers = max(int(cpu_count() / 4), 1) threads = [] - print('Starting {} thumbnail processor workers\n'.format(num_workers)) + logger.info('Starting {} thumbnail processor workers'.format(num_workers)) for i in range(num_workers): t = threading.Thread(target=worker) @@ -44,12 +46,12 @@ def run_processors(self): num_remaining = Task.objects.filter(type='generate_thumbnails', status='P').count() if num_remaining: - print('{} tasks remaining for thumbnail processing'.format(num_remaining)) + logger.info('{} tasks remaining for thumbnail processing'.format(num_remaining)) # Load 'Pending' tasks onto worker threads for task in Task.objects.filter(type='generate_thumbnails', status='P')[:64]: q.put(task) - print('Finished thumbnail processing batch') + logger.info('Finished thumbnail processing batch') # Wait until all threads have finished q.join() diff --git a/photonix/photos/management/commands/watch_photos.py b/photonix/photos/management/commands/watch_photos.py index be583877..fea2e43b 100644 --- a/photonix/photos/management/commands/watch_photos.py +++ b/photonix/photos/management/commands/watch_photos.py @@ -1,6 +1,4 @@ import asyncio -import imghdr -import subprocess from pathlib import Path from time import sleep @@ -12,6 +10,7 @@ from photonix.photos.utils.db import record_photo, move_or_rename_photo, delete_child_dir_all_photos from photonix.photos.models import LibraryPath +from photonix.web.utils import logger class Command(BaseCommand): @@ -67,13 +66,13 @@ async def check_libraries(): for path, id in current_libraries.items(): if path not in watching_libraries: for directory in get_directories_recursive(Path(path)): - print('Watching new path:', directory) + logger.info(f'Watching new path: {directory}') watch = inotify.add_watch(directory, Mask.MODIFY | Mask.CREATE | Mask.DELETE | Mask.CLOSE | Mask.MOVE) watching_libraries[path] = (id, watch) for path, (id, watch) in watching_libraries.items(): if path not in current_libraries: - print('Removing old path:', path) + logger.info(f'Removing old path: {path}') inotify.rm_watch(watch) await asyncio.sleep(4) @@ -88,16 +87,16 @@ async def handle_inotify_events(): photo_moved_from_cookie = moved_from_attr_dict.get('moved_from_cookie') moved_from_attr_dict = {} if event.mask.name == 'MOVED_TO' and photo_moved_from_cookie == event.cookie: - print(f'Moving or renaming the photo "{str(event.path)}" from library "{library_id}"') + logger.info(f'Moving or renaming the photo "{str(event.path)}" from library "{library_id}"') await move_or_rename_photo_async(photo_moved_from_path, event.path, library_id) else: - print(f'Removing photo "{str(photo_moved_from_path)}" from library "{library_id}"') + logger.info(f'Removing photo "{str(photo_moved_from_path)}" from library "{library_id}"') await record_photo_async(photo_moved_from_path, library_id, 'MOVED_FROM') elif Mask.CREATE in event.mask and event.path is not None and event.path.is_dir(): current_libraries = await get_libraries() for path, id in current_libraries.items(): for directory in get_directories_recursive(event.path): - print('Watching newly created child directory:', directory) + logger.info(f'Watching newly created child directory: {directory}') watch = inotify.add_watch(directory, Mask.MODIFY | Mask.CREATE | Mask.DELETE | Mask.CLOSE | Mask.MOVE) watching_libraries[path] = (id, watch) @@ -113,15 +112,14 @@ async def handle_inotify_events(): 'moved_from_path': event.path, 'moved_from_cookie': event.cookie} else: - print(f'Removing photo "{photo_path}" from library "{library_id}"') + logger.info(f'Removing photo "{photo_path}" from library "{library_id}"') await record_photo_async(photo_path, library_id, event.mask.name) elif event.mask.value == 1073741888: - print(f'Delete child directory with its all photos "{photo_path}" to library "{library_id}"') + logger.info(f'Delete child directory with its all photos "{photo_path}" to library "{library_id}"') await delete_child_dir_all_photos_async(photo_path, library_id) else: - if imghdr.what(photo_path) or not subprocess.run(['dcraw', '-i', photo_path]).returncode: - print(f'Adding photo "{photo_path}" to library "{library_id}"') - await record_photo_async(photo_path, library_id, event.mask.name) + logger.info(f'Adding photo "{photo_path}" to library "{library_id}"') + await record_photo_async(photo_path, library_id, event.mask.name) loop = asyncio.get_event_loop() loop.create_task(check_libraries()) @@ -130,7 +128,7 @@ async def handle_inotify_events(): try: loop.run_forever() except KeyboardInterrupt: - print('Shutting down') + logger.info('Shutting down') finally: loop.run_until_complete(loop.shutdown_asyncgens()) loop.close() diff --git a/photonix/photos/migrations/0008_auto_20210604_1842.py b/photonix/photos/migrations/0008_auto_20210604_1842.py new file mode 100644 index 00000000..a93170cc --- /dev/null +++ b/photonix/photos/migrations/0008_auto_20210604_1842.py @@ -0,0 +1,48 @@ +# Generated by Django 3.0.14 on 2021-06-04 18:42 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('photos', '0007_add_library_ForeignKey'), + ] + + operations = [ + migrations.AlterField( + model_name='photo', + name='drive_mode', + field=models.CharField(max_length=64, null=True), + ), + migrations.AlterField( + model_name='photo', + name='metering_mode', + field=models.CharField(max_length=64, null=True), + ), + migrations.AlterField( + model_name='photo', + name='shooting_mode', + field=models.CharField(max_length=64, null=True), + ), + migrations.AlterField( + model_name='phototag', + name='source', + field=models.CharField(choices=[('H', 'Human'), ('C', 'Computer')], db_index=True, max_length=1), + ), + migrations.AlterField( + model_name='tag', + name='source', + field=models.CharField(choices=[('H', 'Human'), ('C', 'Computer')], db_index=True, max_length=1), + ), + migrations.AlterField( + model_name='tag', + name='type', + field=models.CharField(choices=[('L', 'Location'), ('O', 'Object'), ('F', 'Face'), ('C', 'Color'), ('S', 'Style'), ('G', 'Generic'), ('E', 'Event')], db_index=True, max_length=1, null=True), + ), + migrations.AlterField( + model_name='task', + name='status', + field=models.CharField(choices=[('P', 'Pending'), ('S', 'Started'), ('C', 'Completed'), ('F', 'Failed')], db_index=True, default='P', max_length=1), + ), + ] diff --git a/photonix/photos/migrations/0009_auto_20210617_2218.py b/photonix/photos/migrations/0009_auto_20210617_2218.py new file mode 100644 index 00000000..9a1db00c --- /dev/null +++ b/photonix/photos/migrations/0009_auto_20210617_2218.py @@ -0,0 +1,43 @@ +# Generated by Django 3.2.3 on 2021-06-17 22:18 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('photos', '0008_auto_20210604_1842'), + ] + + operations = [ + migrations.AddField( + model_name='library', + name='classification_face_enabled', + field=models.BooleanField(default=False, help_text='Run face detection on photos?'), + ), + migrations.AddField( + model_name='phototag', + name='deleted', + field=models.BooleanField(default=False), + ), + migrations.AddField( + model_name='phototag', + name='extra_data', + field=models.TextField(null=True), + ), + migrations.AddField( + model_name='phototag', + name='retrained_model_version', + field=models.PositiveBigIntegerField(default=0, help_text='If classifier has models that are re-trained locally (e.g. Face) then we want to store this too (YYYYMMDDHHMMSS)'), + ), + migrations.AlterField( + model_name='photo', + name='flash', + field=models.BooleanField(null=True), + ), + migrations.AlterField( + model_name='phototag', + name='model_version', + field=models.PositiveIntegerField(default=0, help_text='Version number of classifier model if source is Computer (YYYYMMDD)'), + ), + ] diff --git a/photonix/photos/migrations/0010_auto_20210710_0959.py b/photonix/photos/migrations/0010_auto_20210710_0959.py new file mode 100644 index 00000000..feb380e5 --- /dev/null +++ b/photonix/photos/migrations/0010_auto_20210710_0959.py @@ -0,0 +1,23 @@ +# Generated by Django 3.2.3 on 2021-07-10 09:59 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('photos', '0009_auto_20210617_2218'), + ] + + operations = [ + migrations.AlterField( + model_name='photofile', + name='raw_external_params', + field=models.CharField(blank=True, max_length=32, null=True), + ), + migrations.AlterField( + model_name='photofile', + name='raw_external_version', + field=models.CharField(blank=True, max_length=32, null=True), + ), + ] diff --git a/photonix/photos/models.py b/photonix/photos/models.py index f3f66296..ff141db3 100644 --- a/photonix/photos/models.py +++ b/photonix/photos/models.py @@ -7,6 +7,7 @@ from django.utils import timezone from photonix.common.models import UUIDModel, VersionedModel +from photonix.web.utils import logger User = get_user_model() @@ -25,6 +26,7 @@ class Library(UUIDModel, VersionedModel): classification_location_enabled = models.BooleanField(default=False, help_text='Run location detection on photos?') classification_style_enabled = models.BooleanField(default=False, help_text='Run style classification on photos?') classification_object_enabled = models.BooleanField(default=False, help_text='Run object detection on photos?') + classification_face_enabled = models.BooleanField(default=False, help_text='Run face detection on photos?') setup_stage_completed = models.CharField(max_length=2, choices=LIBRARY_SETUP_STAGE_COMPLETED_CHOICES, blank=True, null=True, help_text='Where the user got to during onboarding setup') class Meta: @@ -37,6 +39,9 @@ def rescan(self): for library_path in self.paths: library_path.rescan() + def get_library_path_store(self): + return self.paths.filter(type='St')[0] + LIBRARY_PATH_TYPE_CHOICES = ( ('St', 'Store'), @@ -118,10 +123,10 @@ class Photo(UUIDModel, VersionedModel): exposure = models.CharField(max_length=8, blank=True, null=True) iso_speed = models.PositiveIntegerField(null=True) focal_length = models.DecimalField(max_digits=4, decimal_places=1, null=True) - flash = models.NullBooleanField() - metering_mode = models.CharField(max_length=32, null=True) - drive_mode = models.CharField(max_length=32, null=True) - shooting_mode = models.CharField(max_length=32, null=True) + flash = models.BooleanField(null=True) + metering_mode = models.CharField(max_length=64, null=True) + drive_mode = models.CharField(max_length=64, null=True) + shooting_mode = models.CharField(max_length=64, null=True) camera = models.ForeignKey(Camera, related_name='photos', null=True, on_delete=models.CASCADE) lens = models.ForeignKey(Lens, related_name='photos', null=True, on_delete=models.CASCADE) latitude = models.DecimalField(max_digits=9, decimal_places=6, null=True) @@ -163,6 +168,16 @@ def base_file(self): def base_image_path(self): return self.base_file.base_image_path + @property + def download_url(self): + library_url = self.library.get_library_path_store().url + if not library_url: + library_url = '/photos/' + library_path = self.library.get_library_path_store().path + if not library_path: + library_path = '/data/photos/' + return self.base_file.path.replace(library_path, library_url) + @property def dimensions(self): file = self.base_file @@ -189,8 +204,8 @@ class PhotoFile(UUIDModel, VersionedModel): thumbnailed_version = models.PositiveIntegerField(default=0) # Version from photos.utils.thumbnails.THUMBNAILER_VERSION at time of generating the required thumbnails declared in settings.THUMBNAIL_SIZES raw_processed = models.BooleanField(default=False) raw_version = models.PositiveIntegerField(null=True) - raw_external_params = models.CharField(max_length=16, blank=True, null=True) - raw_external_version = models.CharField(max_length=16, blank=True, null=True) + raw_external_params = models.CharField(max_length=32, blank=True, null=True) + raw_external_version = models.CharField(max_length=32, blank=True, null=True) def __str__(self): return str(self.path) @@ -219,6 +234,7 @@ def base_image_path(self): ('C', 'Color'), ('S', 'Style'), # See Karayev et al.: Recognizing Image Style ('G', 'Generic'), # Tags created by user + ('E', 'Event'), # Checked image taken date is any festival date. ) @@ -226,8 +242,8 @@ class Tag(UUIDModel, VersionedModel): library = models.ForeignKey(Library, related_name='tags', on_delete=models.CASCADE) name = models.CharField(max_length=128) parent = models.ForeignKey('Tag', related_name='+', null=True, on_delete=models.CASCADE) - type = models.CharField(max_length=1, choices=TAG_TYPE_CHOICES, null=True) - source = models.CharField(max_length=1, choices=SOURCE_CHOICES) + type = models.CharField(max_length=1, choices=TAG_TYPE_CHOICES, null=True, db_index=True) + source = models.CharField(max_length=1, choices=SOURCE_CHOICES, db_index=True) ordering = models.FloatField(null=True) class Meta: @@ -241,8 +257,9 @@ def __str__(self): class PhotoTag(UUIDModel, VersionedModel): photo = models.ForeignKey(Photo, related_name='photo_tags', on_delete=models.CASCADE, null=True) tag = models.ForeignKey(Tag, related_name='photo_tags', on_delete=models.CASCADE) - source = models.CharField(max_length=1, choices=SOURCE_CHOICES) - model_version = models.PositiveIntegerField(default=0) + source = models.CharField(max_length=1, choices=SOURCE_CHOICES, db_index=True) + model_version = models.PositiveIntegerField(default=0, help_text='Version number of classifier model if source is Computer (YYYYMMDD)') + retrained_model_version = models.PositiveBigIntegerField(default=0, help_text='If classifier has models that are re-trained locally (e.g. Face) then we want to store this too (YYYYMMDDHHMMSS)') confidence = models.FloatField() significance = models.FloatField(null=True) verified = models.BooleanField(default=False) @@ -252,6 +269,9 @@ class PhotoTag(UUIDModel, VersionedModel): position_y = models.FloatField(null=True) size_x = models.FloatField(null=True) size_y = models.FloatField(null=True) + # A place to store extra JSON data such as face feature positions for eyes, nose and mouth + extra_data = models.TextField(null=True) + deleted = models.BooleanField(default=False) class Meta: ordering = ['-significance'] @@ -271,7 +291,7 @@ def __str__(self): class Task(UUIDModel, VersionedModel): type = models.CharField(max_length=128, db_index=True) subject_id = models.UUIDField(db_index=True) - status = models.CharField(max_length=1, choices=TAG_TYPE_CHOICES, default='P', db_index=True) + status = models.CharField(max_length=1, choices=TASK_STATUS_CHOICES, default='P', db_index=True) started_at = models.DateTimeField(null=True) finished_at = models.DateTimeField(null=True) parent = models.ForeignKey('self', related_name='children', null=True, on_delete=models.CASCADE) @@ -308,7 +328,10 @@ def complete(self, next_type=None, next_subject_id=None): self.parent.complete( next_type=next_type, next_subject_id=next_subject_id) - def failed(self): + def failed(self, error=None, traceback=None): self.status = 'F' self.finished_at = timezone.now() self.save() + + if error: + logger.error(error) diff --git a/photonix/photos/schema.py b/photonix/photos/schema.py index 4af142d5..e5d9a3a7 100644 --- a/photonix/photos/schema.py +++ b/photonix/photos/schema.py @@ -1,19 +1,22 @@ +import os from django.conf import settings -import django_filters +from django.contrib.auth import get_user_model, load_backend, login from django_filters import CharFilter from graphene_django.filter import DjangoFilterConnectionField from graphene_django.types import DjangoObjectType from graphql_jwt.decorators import login_required from graphql import GraphQLError -from django.db.models import Q -from django.contrib.auth import get_user_model -from .models import Library, Camera, Lens, Photo, Tag, PhotoTag, LibraryPath, LibraryUser, PhotoFile, Task +from django.db.models import Case, When, Value, IntegerField, Q from django.contrib.auth import load_backend, login +from photonix.photos.utils.tasks import count_remaining_task +from .models import Library, Camera, Lens, Photo, Tag, PhotoTag, LibraryPath, LibraryUser, PhotoFile, Task from photonix.photos.utils.filter_photos import filter_photos_queryset, sort_photos_exposure from photonix.photos.utils.metadata import PhotoMetadata -import os +from django.db.models.functions import Lower +import django_filters import graphene +import os User = get_user_model() @@ -34,9 +37,15 @@ class Meta: class PhotoTagType(DjangoObjectType): + show_verify_icon = graphene.Boolean() + class Meta: model = PhotoTag + def resolve_show_verify_icon(self, info): + if self.tag.type == 'F' and not self.verified and self.tag.photo_tags.filter(verified=True).exists(): + return True + return False class PhotoFileType(DjangoObjectType): class Meta: @@ -60,16 +69,20 @@ class PhotoInterface(graphene.Interface): class PhotoNode(DjangoObjectType): url = graphene.String() location = graphene.String() - location_tags = graphene.List(PhotoTagType) - object_tags = graphene.List(PhotoTagType) - color_tags = graphene.List(PhotoTagType) - style_tags = graphene.List(PhotoTagType) width = graphene.Int() height = graphene.Int() generic_tags = graphene.List(PhotoTagType) photo_file = graphene.List(PhotoFileType) base_file_path = graphene.String() base_file_id = graphene.UUID() + download_url = graphene.String() + + color_tags = graphene.List(PhotoTagType) + location_tags = graphene.List(PhotoTagType) + person_tags = graphene.List(PhotoTagType) + style_tags = graphene.List(PhotoTagType) + object_tags = graphene.List(PhotoTagType) + event_tags = graphene.List(PhotoTagType) class Meta: model = Photo @@ -84,18 +97,6 @@ def resolve_url(self, info): size = settings.THUMBNAIL_SIZES[-1] return self.thumbnail_url(size) - def resolve_location_tags(self, info): - return self.photo_tags.filter(tag__type='L') - - def resolve_object_tags(self, info): - return self.photo_tags.filter(tag__type='O') - - def resolve_color_tags(self, info): - return self.photo_tags.filter(tag__type='C') - - def resolve_style_tags(self, info): - return self.photo_tags.filter(tag__type='S') - def resolve_width(self, info): return self.dimensions[0] @@ -114,6 +115,27 @@ def resolve_base_file_path(self, info): def resolve_base_file_id(self, info): return self.base_file.id + def resolve_download_url(self, info): + return self.download_url + + def resolve_color_tags(self, info): + return self.photo_tags.filter(tag__type='C') + + def resolve_location_tags(self, info): + return self.photo_tags.filter(tag__type='L') + + def resolve_person_tags(self, info): + return self.photo_tags.filter(tag__type='F') + + def resolve_style_tags(self, info): + return self.photo_tags.filter(tag__type='S') + + def resolve_object_tags(self, info): + return self.photo_tags.filter(tag__type='O') + + def resolve_event_tags(self, info): + return self.photo_tags.filter(tag__type='E') + class PhotoFilter(django_filters.FilterSet): multi_filter = CharFilter(method='multi_filter_filter') @@ -172,19 +194,35 @@ class Meta: model = Tag +class EventTagType(DjangoObjectType): + class Meta: + model = Tag + + class LibrarySetting(graphene.ObjectType): - """To pass fields for library settingg query api.""" + """To pass fields for library setting query api.""" library = graphene.Field(LibraryType) source_folder = graphene.String() - class PhotoMetadataFields(graphene.ObjectType): """ Metadata about photo as extracted by exiftool """ data = graphene.types.generic.GenericScalar() ok = graphene.Boolean() +class TaskType(graphene.ObjectType): + """Different type of tasks.""" + + generate_thumbnails = graphene.types.generic.GenericScalar() + process_raw = graphene.types.generic.GenericScalar() + classify_color = graphene.types.generic.GenericScalar() + classify_location = graphene.types.generic.GenericScalar() + classify_object = graphene.types.generic.GenericScalar() + classify_style = graphene.types.generic.GenericScalar() + classify_face = graphene.types.generic.GenericScalar() + + class Query(graphene.ObjectType): all_libraries = graphene.List(LibraryType) camera = graphene.Field(CameraType, id=graphene.UUID(), make=graphene.String(), model=graphene.String()) @@ -210,9 +248,11 @@ class Query(graphene.ObjectType): all_person_tags = graphene.List(PersonTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) all_color_tags = graphene.List(ColorTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) all_style_tags = graphene.List(StyleTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) + all_event_tags = graphene.List(EventTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) all_generic_tags = graphene.List(LocationTagType, library_id=graphene.UUID(), multi_filter=graphene.String()) library_setting = graphene.Field(LibrarySetting, library_id=graphene.UUID()) photo_file_metadata = graphene.Field(PhotoMetadataFields, photo_file_id=graphene.UUID()) + task_progress = graphene.Field(TaskType) def resolve_all_libraries(self, info, **kwargs): user = info.context.user @@ -291,7 +331,7 @@ def resolve_photo(self, info, **kwargs): @login_required def resolve_all_photos(self, info, **kwargs): user = info.context.user - return Photo.objects.filter(library__users__user=user) + return Photo.objects.filter(library__users__user=user, thumbnailed_version__isnull=False) @login_required def resolve_map_photos(self, info, **kwargs): @@ -331,8 +371,32 @@ def resolve_all_person_tags(self, info, **kwargs): photos_list = filter_photos_queryset( filters, Photo.objects.filter(library__users__user=user), kwargs.get('library_id')) - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='P', photo_tags__photo__in=photos_list).distinct() - return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='P') + # Sort Person tags but keep "Unknown..." ones at the end + return Tag.objects.filter( + library__users__user=user, + library__id=kwargs.get('library_id'), + type='F', + photo_tags__photo__in=photos_list + ).annotate( + unknown_tag=Case( + When(name__startswith='Unknown', then=Value(1)), + default=Value(2), + output_field=IntegerField(), + ) + ).order_by("-unknown_tag", Lower('name')).distinct() + # Sort Person tags but keep "Unknown..." ones at the end + return Tag.objects.filter( + library__users__user=user, + library__id=kwargs.get('library_id'), + type='F', + photo_tags__deleted=False + ).annotate( + unknown_tag=Case( + When(name__startswith='Unknown', then=Value(1)), + default=Value(2), + output_field=IntegerField(), + ) + ).order_by("-unknown_tag", Lower('name')).distinct() def resolve_all_color_tags(self, info, **kwargs): user = info.context.user @@ -358,6 +422,18 @@ def resolve_all_style_tags(self, info, **kwargs): return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='S', photo_tags__photo__in=photos_list).distinct() return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='S') + def resolve_all_event_tags(self, info, **kwargs): + user = info.context.user + if kwargs.get('multi_filter'): + if not kwargs.get('library_id'): + raise GraphQLError('library_id not supplied!') + filters = kwargs.get('multi_filter').split(' ') + photos_list = filter_photos_queryset( + filters, Photo.objects.filter(library__users__user=user), + kwargs.get('library_id')) + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='E', photo_tags__photo__in=photos_list).distinct() + return Tag.objects.filter(library__users__user=user, library__id=kwargs.get('library_id'), type='E') + def resolve_all_generic_tags(self, info, **kwargs): user = info.context.user if kwargs.get('multi_filter'): @@ -392,6 +468,17 @@ def resolve_photo_file_metadata(self, info, **kwargs): } return {'ok': False} + def resolve_task_progress(self, info, **kwargs): + """Return No. of remaining and total tasks with there diffrent types.""" + return { + "generate_thumbnails": count_remaining_task('generate_thumbnails'), + "process_raw": count_remaining_task('process_raw'), + "classify_color": count_remaining_task('classify.color'), + "classify_location": count_remaining_task('classify.location'), + "classify_object": count_remaining_task('classify.object'), + "classify_style": count_remaining_task('classify.style'), + "classify_face": count_remaining_task('classify.face')} + class LibraryInput(graphene.InputObjectType): """LibraryInput to take input of library fields from frontend.""" @@ -400,6 +487,7 @@ class LibraryInput(graphene.InputObjectType): classification_location_enabled = graphene.Boolean() classification_style_enabled = graphene.Boolean() classification_object_enabled = graphene.Boolean() + classification_face_enabled = graphene.Boolean() source_folder = graphene.String(required=False) user_id = graphene.ID() library_id = graphene.ID() @@ -529,6 +617,37 @@ def mutate(root, info, input=None): return UpdateLibraryObjectEnabled(ok=ok, classification_object_enabled=None) +class UpdateLibraryFaceEnabled(graphene.Mutation): + """To update data in database that will be passed from frontend FaceEnabled api.""" + + class Arguments: + """To set arguments in for mute method.""" + + input = LibraryInput(required=False) + + ok = graphene.Boolean() + classification_face_enabled = graphene.Boolean() + + @staticmethod + def mutate(root, info, input=None): + """Method to save the updated data for FaceEnabled api.""" + ok = False + user = info.context.user + libraries = Library.objects.filter(users__user=user, users__owner=True, id=input.library_id) + if libraries and str(input.get('classification_face_enabled')) != 'None': + library_obj = libraries[0] + library_obj.classification_face_enabled = input.classification_face_enabled + library_obj.save() + ok = True + return UpdateLibraryFaceEnabled( + ok=ok, + classification_face_enabled=library_obj.classification_face_enabled) + if not libraries: + raise Exception('User is not the owner of library!') + else: + return UpdateLibraryFaceEnabled(ok=ok, classification_face_enabled=None) + + class UpdateLibrarySourceFolder(graphene.Mutation): """To update data in database that will be passed from frontend SourceFolder api.""" @@ -674,6 +793,7 @@ def mutate(self, info, input=None): library_obj.classification_location_enabled = input.classification_location_enabled library_obj.classification_style_enabled = input.classification_style_enabled library_obj.classification_object_enabled = input.classification_object_enabled + library_obj.classification_face_enabled = input.classification_face_enabled library_obj.save() user = User.objects.get(pk=input.user_id) user.has_configured_image_analysis = True @@ -786,11 +906,84 @@ def mutate(self, info, selected_photo_file_id=None): return ChangePreferredPhotoFile(ok=True) +class EditFaceTag(graphene.Mutation): + """Face tagging for face Detection.""" + + class Arguments: + """Input arguments which will pass from frontend.""" + + photo_tag_id = graphene.ID() + new_name = graphene.String() + + ok = graphene.Boolean() + + @staticmethod + def mutate(self, info, photo_tag_id=None, new_name=None): + """Mutation to create or update face tags and assign them to photoTag.""" + obj = Tag.objects.filter(name=new_name, type='F').first() + photo_tag = PhotoTag.objects.get(id=photo_tag_id) + already_assigned_tag = photo_tag.tag + if obj: + photo_tag.tag = obj + photo_tag.save() + already_assigned_tag.photo_tags.all().count() or already_assigned_tag.delete() + else: + already_assigned_tag.name = new_name + already_assigned_tag.save() + photo_tag.verified = True + photo_tag.confidence = 1 + photo_tag.deleted = False + photo_tag.save() + return EditFaceTag(ok=True) + + +class BlockFaceTag(graphene.Mutation): + """Face tagging for face Detection.""" + + class Arguments: + """Input arguments which will pass from frontend.""" + + photo_tag_id = graphene.ID() + + ok = graphene.Boolean() + + @staticmethod + def mutate(self, info, photo_tag_id=None, new_name=None): + """Mutation to block a face 'F' type photoTag.""" + photo_tag = PhotoTag.objects.get(id=photo_tag_id) + photo_tag.deleted = True + photo_tag.verified = False + photo_tag.confidence = 0 + photo_tag.save() + return BlockFaceTag(ok=True) + + +class VerifyPhoto(graphene.Mutation): + """Face tagging for face Detection.""" + + class Arguments: + """Input arguments which will pass from frontend.""" + + photo_tag_id = graphene.ID() + + ok = graphene.Boolean() + + @staticmethod + def mutate(self, info, photo_tag_id=None, new_name=None): + """Mutation to set verify a face 'F' type photoTag.""" + photo_tag = PhotoTag.objects.get(id=photo_tag_id) + photo_tag.verified = True + photo_tag.confidence = 1 + photo_tag.save() + return VerifyPhoto(ok=True) + + class Mutation(graphene.ObjectType): update_color_enabled = UpdateLibraryColorEnabled.Field() update_location_enabled = UpdateLibraryLocationEnabled.Field() update_style_enabled = UpdateLibraryStyleEnabled.Field() update_object_enabled = UpdateLibraryObjectEnabled.Field() + update_face_enabled = UpdateLibraryFaceEnabled.Field() update_source_folder = UpdateLibrarySourceFolder.Field() create_library = CreateLibrary.Field() Photo_importing = PhotoImporting.Field() @@ -799,3 +992,6 @@ class Mutation(graphene.ObjectType): create_generic_tag = CreateGenricTag.Field() remove_generic_tag = RemoveGenericTag.Field() change_preferred_photo_file = ChangePreferredPhotoFile.Field() + edit_face_tag = EditFaceTag.Field() + block_face_tag = BlockFaceTag.Field() + verify_photo = VerifyPhoto.Field() diff --git a/photonix/photos/utils/classification.py b/photonix/photos/utils/classification.py index 663d5775..d3a37842 100644 --- a/photonix/photos/utils/classification.py +++ b/photonix/photos/utils/classification.py @@ -1,17 +1,23 @@ import queue import threading from time import sleep +import traceback from django.db import transaction from django.utils import timezone + from photonix.photos.models import Task, Photo from photonix.photos.utils.tasks import requeue_stuck_tasks +from photonix.web.utils import logger + CLASSIFIERS = [ 'color', + 'event', 'location', - 'object', + 'face', 'style', + 'object', ] @@ -59,11 +65,13 @@ def __worker(self): def __process_task(self, task): try: - print(f'Running task: {task.type} - {task.subject_id}') + logger.info(f'Running task: {task.type} - {task.subject_id}') task.start() self.runner(task.subject_id) task.complete() - except: + except Exception: + logger.error(f'Error processing task: {task.type} - {task.subject_id}') + traceback.print_exc() task.failed() def __clean_up(self): @@ -74,7 +82,7 @@ def __clean_up(self): t.join() def run(self, loop=True): - print('Starting {} {} workers\n'.format(self.num_workers, self.task_type)) + logger.info('Starting {} {} workers'.format(self.num_workers, self.task_type)) if self.num_workers > 1: for i in range(self.num_workers): @@ -89,15 +97,17 @@ def run(self, loop=True): task_queryset = Task.objects.filter(library__classification_color_enabled=True, type=self.task_type, status='P') elif self.task_type == 'classify.location': task_queryset = Task.objects.filter(library__classification_location_enabled=True, type=self.task_type, status='P') - elif self.task_type == 'classify.object': - task_queryset = Task.objects.filter(library__classification_object_enabled=True, type=self.task_type, status='P') + elif self.task_type == 'classify.face': + task_queryset = Task.objects.filter(library__classification_face_enabled=True, type=self.task_type, status='P') elif self.task_type == 'classify.style': task_queryset = Task.objects.filter(library__classification_style_enabled=True, type=self.task_type, status='P') + elif self.task_type == 'classify.object': + task_queryset = Task.objects.filter(library__classification_object_enabled=True, type=self.task_type, status='P') else: task_queryset = Task.objects.filter(type=self.task_type, status='P') for task in task_queryset[:8]: if self.num_workers > 1: - print('putting task') + logger.debug('putting task') self.queue.put(task) else: self.__process_task(task) diff --git a/photonix/photos/utils/db.py b/photonix/photos/utils/db.py index 93c95193..7d100985 100644 --- a/photonix/photos/utils/db.py +++ b/photonix/photos/utils/db.py @@ -1,16 +1,38 @@ -import mimetypes -import os -import re from datetime import datetime from decimal import Decimal +import imghdr +import mimetypes +import os, time +import re +import subprocess from django.utils.timezone import utc from photonix.photos.models import Camera, Lens, Photo, PhotoFile, Task, Library, Tag, PhotoTag -from photonix.photos.utils.metadata import (PhotoMetadata, parse_datetime, parse_gps_location) +from photonix.photos.utils.metadata import PhotoMetadata, parse_datetime, parse_gps_location, get_mimetype +from photonix.web.utils import logger + + +MIMETYPE_WHITELIST = [ + # This list is in addition to the filetypes detected by imghdr and 'dcraw -i' + 'image/heif', + 'image/heif-sequence', + 'image/heic', + 'image/heic-sequence', + 'image/avif', + 'image/avif-sequence', +] def record_photo(path, library, inotify_event_type=None): + logger.info(f'Recording photo {path}') + + mimetype = get_mimetype(path) + + if not imghdr.what(path) and not mimetype in MIMETYPE_WHITELIST and subprocess.run(['dcraw', '-i', path]).returncode: + logger.error(f'File is not a supported type: {path} ({mimetype})') + return None + if type(library) == Library: library_id = library.id else: @@ -33,17 +55,20 @@ def record_photo(path, library, inotify_event_type=None): metadata = PhotoMetadata(path) date_taken = None - possible_date_keys = ['Date/Time Original', 'Date Time Original', 'Date/Time', 'Date Time', 'GPS Date/Time', 'Modify Date', 'File Modification Date/Time'] + possible_date_keys = ['Create Date', 'Date/Time Original', 'Date Time Original', 'Date/Time', 'Date Time', 'GPS Date/Time', 'File Modification Date/Time'] for date_key in possible_date_keys: date_taken = parse_datetime(metadata.get(date_key)) if date_taken: break + # If EXIF data not found. + date_taken = date_taken or datetime.strptime(time.ctime(os.path.getctime(path)), "%a %b %d %H:%M:%S %Y") camera = None - camera_make = metadata.get('Make', '') + camera_make = metadata.get('Make', '')[:Camera.make.field.max_length] camera_model = metadata.get('Camera Model Name', '') if camera_model: camera_model = camera_model.replace(camera_make, '').strip() + camera_model = camera_model[:Camera.model.field.max_length] if camera_make and camera_model: try: camera = Camera.objects.get(library_id=library_id, make=camera_make, model=camera_model) @@ -108,15 +133,15 @@ def record_photo(path, library, inotify_event_type=None): photo = Photo( library_id=library_id, taken_at=date_taken, - taken_by=metadata.get('Artist') or None, + taken_by=metadata.get('Artist', '')[:Photo.taken_by.field.max_length] or None, aperture=aperture, - exposure=metadata.get('Exposure Time') or None, + exposure=metadata.get('Exposure Time', '')[:Photo.exposure.field.max_length] or None, iso_speed=iso_speed, focal_length=metadata.get('Focal Length') and metadata.get('Focal Length').split(' ', 1)[0] or None, flash=metadata.get('Flash') and 'on' in metadata.get('Flash').lower() or False, - metering_mode=metadata.get('Metering Mode') or None, - drive_mode=metadata.get('Drive Mode') or None, - shooting_mode=metadata.get('Shooting Mode') or None, + metering_mode=metadata.get('Metering Mode', '')[:Photo.metering_mode.field.max_length] or None, + drive_mode=metadata.get('Drive Mode', '')[:Photo.drive_mode.field.max_length] or None, + shooting_mode=metadata.get('Shooting Mode', '')[:Photo.shooting_mode.field.max_length] or None, camera=camera, lens=lens, latitude=latitude, @@ -129,7 +154,7 @@ def record_photo(path, library, inotify_event_type=None): for subject in metadata.get('Subject', '').split(','): subject = subject.strip() if subject: - tag = Tag.objects.create(library_id=library_id, name=subject, type="G") + tag, _ = Tag.objects.get_or_create(library_id=library_id, name=subject, type="G") PhotoTag.objects.create( photo=photo, tag=tag, @@ -152,7 +177,7 @@ def record_photo(path, library, inotify_event_type=None): photo_file.path = path photo_file.width = width photo_file.height = height - photo_file.mimetype = mimetypes.guess_type(path)[0] + photo_file.mimetype = mimetype photo_file.file_modified_at = file_modified_at photo_file.bytes = os.stat(path).st_size photo_file.preferred = False # TODO diff --git a/photonix/photos/utils/metadata.py b/photonix/photos/utils/metadata.py index 0c45b79a..a4016fc5 100644 --- a/photonix/photos/utils/metadata.py +++ b/photonix/photos/utils/metadata.py @@ -1,8 +1,9 @@ +from datetime import datetime +from dateutil.parser import parse as parse_date +import mimetypes import os import re from subprocess import Popen, PIPE -from datetime import datetime -from dateutil.parser import parse as parse_date from django.utils.timezone import utc @@ -11,7 +12,8 @@ class PhotoMetadata(object): def __init__(self, path): self.data = {} try: - result = Popen(['exiftool', path], stdout=PIPE, stdin=PIPE, stderr=PIPE).communicate()[0].decode('utf-8') + # exiftool produces data such as MIME Type for non-photos too + result = Popen(['exiftool', path], stdout=PIPE, stdin=PIPE, stderr=PIPE).communicate()[0].decode('utf-8', 'ignore') except UnicodeDecodeError: result = '' for line in str(result).split('\n'): @@ -22,6 +24,10 @@ def __init__(self, path): except ValueError: pass + # Some file MIME Types can not be identified by exiftool so we fall back to Python's mimetypes library so the get_mimetype() funciton below is universal + if not self.data.get('MIME Type'): + self.data['MIME Type'] = mimetypes.guess_type(path)[0] + def get(self, attribute, default=None): return self.data.get(attribute, default) @@ -37,10 +43,13 @@ def parse_datetime(date_str): try: return datetime.strptime(date_str, '%Y:%m:%d %H:%M:%S').replace(tzinfo=utc) except ValueError: - parsed_date = parse_date(date_str) - if not parsed_date.tzinfo: - parsed_date = parsed_date.replace(tzinfo=utc) - return parsed_date + try: + parsed_date = parse_date(date_str) + if not parsed_date.tzinfo: + parsed_date = parsed_date.replace(tzinfo=utc) + return parsed_date + except ValueError: + return None def parse_gps_location(gps_str): @@ -66,11 +75,19 @@ def get_datetime(path): ''' # TODO: Use 'GPS Date/Time' if available as it's more accurate - # First try the date in the metadate + # First try the date in the metadata metadata = PhotoMetadata(path) date_str = metadata.get('Date/Time Original') if date_str: - return parse_datetime(date_str) + parsed_datetime = parse_datetime(date_str) + if parsed_datetime: + return parsed_datetime + + date_str = metadata.get('Create Date') + if date_str: + parsed_datetime = parse_datetime(date_str) + if parsed_datetime: + return parsed_datetime # If there was not date metadata, try to infer it from filename fn = os.path.split(path)[1] @@ -78,10 +95,14 @@ def get_datetime(path): if not matched: matched = re.search(r'\D((19|20)[0-9]{2})([0-9]{2})([0-9]{2})\D', fn) if matched: - # import pdb; pdb.set_trace() date_str = '{}-{}-{}'.format(matched.group(1), matched.group(3), matched.group(4)) return datetime.strptime(date_str, '%Y-%m-%d') - return None + + # Otherwise get file creation time + try: + return datetime.fromtimestamp(os.stat(path).st_ctime).replace(tzinfo=utc) + except: + return None def get_dimensions(path): @@ -90,9 +111,8 @@ def get_dimensions(path): return (int(metadata.data['Image Width']), int(metadata.data['Image Height'])) return (None, None) + def get_mimetype(path): - # Done - """Pulls the MIME Type from the given path""" metadata = PhotoMetadata(path) if metadata.data.get('MIME Type'): return metadata.data.get('MIME Type') diff --git a/photonix/photos/utils/organise.py b/photonix/photos/utils/organise.py index 74a6b1cd..175f3447 100644 --- a/photonix/photos/utils/organise.py +++ b/photonix/photos/utils/organise.py @@ -11,6 +11,7 @@ find_new_file_name, mkdir_p) from photonix.photos.utils.metadata import get_datetime + SYNOLOGY_THUMBNAILS_DIR_NAME = '/@eaDir' diff --git a/photonix/photos/utils/raw.py b/photonix/photos/utils/raw.py index a46e9c61..2ceeac2b 100644 --- a/photonix/photos/utils/raw.py +++ b/photonix/photos/utils/raw.py @@ -9,6 +9,7 @@ from django.conf import settings from photonix.photos.models import Photo, PhotoFile, Task +from photonix.web.utils import logger from .metadata import get_dimensions, get_mimetype @@ -60,7 +61,7 @@ def process_raw_task(photo_file_id, task): output_path, version, process_params, external_version = generate_jpeg(photo_file.path) if not output_path: - task.failed() + task.failed('Could not generate JPEG') return if not os.path.isdir(settings.PHOTO_RAW_PROCESSED_DIR): @@ -103,22 +104,29 @@ def __get_exiftool_image(temp_dir, basename): return exiftool_files def __has_acceptable_dimensions(original_image_path, new_image_path, accept_empty_original_dimensions=False): + logger.debug('Checking image dimensions') original_image_dimensions = get_dimensions(original_image_path) + logger.debug(f'Original image dimensions: {original_image_dimensions}') new_image_dimensions = get_dimensions(new_image_path) + logger.debug(f'New image dimensions: {new_image_dimensions}') # We don't know the original dimensions so have nothing to compare to if original_image_dimensions == (None, None): if accept_empty_original_dimensions: + logger.debug('No original dimensions, accepting new dimensions') return True else: + logger.debug('No original dimensions, rejecting new dimensions') return False # Embedded image can't be the full resolution - if new_image_dimensions[0] < 512 or new_image_dimensions[1] < 512: + if not new_image_dimensions[0] or not new_image_dimensions[1] or new_image_dimensions[0] < 512 or new_image_dimensions[1] < 512: + logger.debug('Dimensions are too small') return False # Embedded image is exactly the same dimensions if original_image_dimensions == new_image_dimensions: + logger.debug('Dimensions match exactly') return True # Embedded image within 95% of the raw width and height @@ -126,8 +134,10 @@ def __has_acceptable_dimensions(original_image_path, new_image_path, accept_empt and original_image_dimensions[1] / new_image_dimensions[1] > 0.95 \ and new_image_dimensions[0] / original_image_dimensions[0] > 0.95 \ and new_image_dimensions[1] / original_image_dimensions[1] > 0.95: + logger.debug('Dimensions match closely enough') return True + logger.debug('Dimensions are not good') return False @@ -152,7 +162,34 @@ def __dcraw_version(): return +def __heif_convert_version(): + output = subprocess.Popen(['dpkg', '-s', 'libheif-examples'], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].decode('utf-8') + for line in output.split('\n'): + if 'Version: ' in line: + try: + return re.search(r'([0-9]+.[0-9]+.[0-9]+)', line).group(1) + except AttributeError: + return + + +def __exiftool_version(): + output = subprocess.Popen(['dpkg', '-s', 'libimage-exiftool-perl'], stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.PIPE).communicate()[0].decode('utf-8') + for line in output.split('\n'): + if 'Version: ' in line: + try: + return re.search(r'([0-9]+.[0-9]+.[0-9]+)', line).group(1) + except AttributeError: + return + +def __delete_file_silently(path): + try: + os.remove(path) + except FileNotFoundError: + pass + + def generate_jpeg(path): + logger.debug(f'Generating JPEG for raw file {path}') basename = os.path.basename(path) temp_dir = tempfile.mkdtemp() temp_input_path = Path(temp_dir) / basename @@ -160,10 +197,12 @@ def generate_jpeg(path): valid_image = False process_params = None + external_version = None # Handle Canon's CR3 format since their thumbnails are proprietary. mimetype = get_mimetype(temp_input_path) if mimetype == 'image/x-canon-cr3': + logger.debug('File type detected as Canon Raw v3') subprocess.Popen([ 'exiftool', '-b', '-JpgFromRaw', '-w', 'jpg', '-ext', 'CR3', temp_input_path, '-execute', '-tagsfromfile', temp_input_path, @@ -180,63 +219,85 @@ def generate_jpeg(path): temp_output_path = exiftool_output['output'] else: temp_output_path = None + process_params = 'exiftool -b -JpgFromRaw' + external_version = __exiftool_version() + elif mimetype in ['image/heif', 'image/heic']: + logger.debug('File type detected as HIEF/HEIC') + temp_output_path = Path(temp_dir) / 'out.jpg' + subprocess.run(['heif-convert', '-q', '90', temp_input_path, temp_output_path]) + process_params = 'heif-convert -q 90' + external_version = __heif_convert_version() else: - # First try to extract the JPEG that might be inside the raw file + logger.debug('Attempting to extract JPEG using dcraw') + # Try to extract the JPEG that might be inside the raw file subprocess.run(['dcraw', '-e', temp_input_path]) temp_output_path = __get_generated_image(temp_dir, basename) + process_params = 'dcraw -e' + external_version = __dcraw_version() # Check the JPEGs dimensions are close enough to the raw's dimensions if temp_output_path: if __has_acceptable_dimensions(temp_input_path, temp_output_path): + logger.debug('JPEG file looks good so far') valid_image = True - process_params = 'dcraw -e' else: - os.remove(temp_output_path) + __delete_file_silently(temp_output_path) # Next try to use embedded profile to generate an image if not valid_image: + logger.debug('Attempting to generate JPEG with dcraw using embedded color profile') subprocess.run(['dcraw', '-p embed', temp_input_path]) temp_output_path = __get_generated_image(temp_dir, basename) if temp_output_path: if __has_acceptable_dimensions(temp_input_path, temp_output_path): + logger.debug('JPEG file looks good so far') valid_image = True process_params = 'dcraw -p embed' else: - os.remove(temp_output_path) + __delete_file_silently(temp_output_path) # Finally try to use the embedded whitebalance to generate an image if not valid_image: + logger.debug('Attempting to generate JPEG with dcraw using embedded white balance') subprocess.run(['dcraw', '-w', temp_input_path]) temp_output_path = __get_generated_image(temp_dir, basename) if temp_output_path: if __has_acceptable_dimensions(temp_input_path, temp_output_path, True): + logger.debug('JPEG file looks good so far') valid_image = True process_params = 'dcraw -w' else: - os.remove(temp_output_path) + __delete_file_silently(temp_output_path) # If extracted image isn't a JPEG then we need to convert it if valid_image: valid_image = identified_as_jpeg(temp_output_path) if not valid_image: + logger.debug('JPEG didn\'t pass test, attempting bitmap conversion') jpeg_path = tempfile.mktemp() bitmap_to_jpeg(temp_output_path, jpeg_path) if identified_as_jpeg(jpeg_path): + logger.debug('JPEG file now passes test') temp_output_path = jpeg_path valid_image = True # Move the outputted file to a new temporary location if valid_image: + logger.debug('I\'m happy with the JPEG so moving it to a new location') final_path = tempfile.mktemp() os.rename(temp_output_path, final_path) # Delete the temporary working directory + logger.debug('Deleting temporary files') shutil.rmtree(temp_dir) if valid_image: - return (final_path, RAW_PROCESS_VERSION, process_params, __dcraw_version()) + logger.debug(f'Returning info about JPEG which is temporarily located here: {final_path}') + return (final_path, RAW_PROCESS_VERSION, process_params, external_version) + + logger.error('Couldn\'t make JPEG from raw file') return (None, RAW_PROCESS_VERSION, None, None) diff --git a/photonix/photos/utils/redis.py b/photonix/photos/utils/redis.py new file mode 100644 index 00000000..a365414d --- /dev/null +++ b/photonix/photos/utils/redis.py @@ -0,0 +1,11 @@ +import os + +import redis + + +redis_connection = redis.Redis( + host=os.environ.get('REDIS_HOST', '127.0.0.1'), + port=int(os.environ.get('REDIS_PORT', '6379')), + db=int(os.environ.get('REDIS_DB', '0')), + password=os.environ.get('REDIS_PASSWORD') +) diff --git a/photonix/photos/utils/tasks.py b/photonix/photos/utils/tasks.py index 40a0f47a..2f1c10bf 100644 --- a/photonix/photos/utils/tasks.py +++ b/photonix/photos/utils/tasks.py @@ -1,15 +1,19 @@ from datetime import timedelta from django.utils import timezone - +from django.db.models import Q from photonix.photos.models import Task - def requeue_stuck_tasks(task_type, age_hours=24, max_num=8): # Set old, failed jobs to Pending - for task in Task.objects.filter(type=task_type, status='S', updated_at__lt=timezone.now() - timedelta(hours=24))[:8]: + for task in Task.objects.filter(type=task_type, status='S', updated_at__lt=timezone.now() - timedelta(hours=24))[:max_num]: task.status = 'P' task.save() - for task in Task.objects.filter(type=task_type, status='F', updated_at__lt=timezone.now() - timedelta(hours=24))[:8]: + for task in Task.objects.filter(type=task_type, status='F', updated_at__lt=timezone.now() - timedelta(hours=24))[:max_num]: task.status = 'P' task.save() + +def count_remaining_task(task_type): + """Returned count of remaining task.""" + return { + 'remaining': Task.objects.filter(Q(type=task_type), Q(status='P') | Q(status='S')).count()} diff --git a/photonix/photos/utils/thumbnails.py b/photonix/photos/utils/thumbnails.py index 8225e2dd..acd16b5d 100644 --- a/photonix/photos/utils/thumbnails.py +++ b/photonix/photos/utils/thumbnails.py @@ -8,7 +8,6 @@ import numpy as np from django.conf import settings -from django.utils import timezone from photonix.photos.models import Photo, PhotoFile, Task from photonix.photos.utils.metadata import PhotoMetadata @@ -29,7 +28,7 @@ def generate_thumbnails_for_photo(photo, task): try: photo = Photo.objects.get(id=photo) except Photo.DoesNotExist: - task.failed() + task.failed(f'Photo instance does not exist with id={photo}') return for thumbnail in settings.THUMBNAIL_SIZES: diff --git a/photonix/web/settings.py b/photonix/web/settings.py index 8428b0c5..45ba9241 100644 --- a/photonix/web/settings.py +++ b/photonix/web/settings.py @@ -14,13 +14,13 @@ import os from pathlib import Path -from django.core.management import utils +from .utils import get_secret_key # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = str(Path(__file__).parent.parent.resolve()) -SECRET_KEY = utils.get_random_secret_key() +SECRET_KEY = get_secret_key() DEBUG = os.environ.get('ENV', 'prd') != 'prd' @@ -88,9 +88,50 @@ 'NAME': os.environ.get('POSTGRES_DB', 'photonix'), 'USER': os.environ.get('POSTGRES_USER', 'postgres'), 'PASSWORD': os.environ.get('POSTGRES_PASSWORD', 'password'), + 'PORT': int(os.environ.get('POSTGRES_PORT', '5432')), } } +LOGGING = { + 'version': 1, + 'disable_existing_loggers': False, + 'formatters': { + 'color': { + '()': 'colorlog.ColoredFormatter', + 'format': '%(log_color)s%(asctime)s %(levelname)-8s %(message)s', + 'log_colors': { + 'DEBUG': 'cyan', + 'INFO': 'green', + 'WARNING': 'yellow', + 'ERROR': 'red', + 'CRITICAL': 'white,bg_red', + }, + } + }, + 'handlers': { + 'console': { + 'class': 'logging.StreamHandler', + 'formatter': 'color', + }, + }, + 'root': { + 'handlers': ['console'], + 'level': 'WARNING', + }, + 'loggers': { + 'django': { + 'handlers': ['console'], + 'level': os.getenv('DJANGO_LOG_LEVEL', 'WARNING'), + 'propagate': False, + }, + 'photonix': { + 'handlers': ['console'], + 'level': os.getenv('LOG_LEVEL', 'INFO'), + 'propagate': False, + }, + }, +} + AUTHENTICATION_BACKENDS = [ 'graphql_jwt.backends.JSONWebTokenBackend', 'django.contrib.auth.backends.ModelBackend', @@ -151,8 +192,8 @@ # Width, height, crop method, JPEG quality, whether it should be generated upon upload, force accurate gamma-aware sRGB resizing (256, 256, 'cover', 50, True, True), # Square thumbnails # We use the largest dimension for both dimensions as they won't crop and some with in portrait mode - (960, 960, 'contain', 75, False, False), # 960px - (1920, 1920, 'contain', 75, False, False), # 2k + # (960, 960, 'contain', 75, False, False), # 960px + # (1920, 1920, 'contain', 75, False, False), # 2k (3840, 3840, 'contain', 75, False, False), # 4k ] diff --git a/photonix/web/utils.py b/photonix/web/utils.py new file mode 100644 index 00000000..08ecc39c --- /dev/null +++ b/photonix/web/utils.py @@ -0,0 +1,37 @@ +import logging +import os + +from django.core.management import utils +from redis_lock import Lock + +from photonix.photos.utils.redis import redis_connection + + +logger = logging.getLogger('photonix') + + +def get_secret_key(): + # To avoid each installation having the same Django SECERT_KEY we generate + # a random one and store it in Redis. We have to store it somewhere + # central like Redis because if each worker generated it's own it would + # cause problems (like JWT "Error decoding signature"). + + secret_key = None + + if 'DJANGO_SECRET_KEY' in os.environ: + secret_key = os.environ.get('DJANGO_SECRET_KEY') + else: + if redis_connection.exists('django_secret_key'): + secret_key = redis_connection.get('django_secret_key').decode('utf-8') + else: + # Make sure only first worker generates the key and others get from Redis + with Lock(redis_connection, 'django_secret_key_generation_lock'): + if redis_connection.exists('django_secret_key'): + secret_key = redis_connection.get('django_secret_key').decode('utf-8') + else: + secret_key = utils.get_random_secret_key() + redis_connection.set('django_secret_key', secret_key.encode('utf-8')) + + if not secret_key: + raise EnvironmentError('No secret key available') + return secret_key diff --git a/requirements.txt b/requirements.txt index c61054fe..0fb06b84 100644 --- a/requirements.txt +++ b/requirements.txt @@ -2,10 +2,12 @@ numpy==1.19.2 scipy==1.4.1 matplotlib==3.1.2 tensorflow==2.4.1 +opencv-python==4.5.1.48 +annoy==1.17.0 -Django==3.0.14 +Django==3.2.3 django-cors-headers==3.2.1 -django-filter==2.2.0 +django-filter==2.4.0 PyJWT==1.7.1 django-graphql-jwt==0.3.0 @@ -32,3 +34,4 @@ pytest-django==3.8.0 mock==3.0.5 factory-boy==2.12.0 coverage==5.0.3 +colorlog==5.0.1 diff --git a/system/cron.d/retrain_face_similarity_index b/system/cron.d/retrain_face_similarity_index new file mode 100644 index 00000000..3e6877ac --- /dev/null +++ b/system/cron.d/retrain_face_similarity_index @@ -0,0 +1 @@ +*/5 * * * * root exec /bin/bash -c ". /run/supervisord.env; python /srv/photonix/manage.py retrain_face_similarity_index" diff --git a/system/nginx_dev.conf b/system/nginx_dev.conf index 595212cc..eaebb9e9 100644 --- a/system/nginx_dev.conf +++ b/system/nginx_dev.conf @@ -14,12 +14,7 @@ events { http { include /etc/nginx/mime.types; default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /dev/stdout; + access_log off; sendfile on; tcp_nopush on; @@ -40,19 +35,16 @@ http { location ~ ^/(favicon.png|manifest.json|logo.svg) { root /srv/ui/public; - access_log off; expires 1d; } location /photos { root /data; - access_log off; expires 1d; } location /thumbnails { root /data/cache; - access_log off; expires 1d; } diff --git a/system/nginx_prd.conf b/system/nginx_prd.conf index 93d94733..d671d81f 100644 --- a/system/nginx_prd.conf +++ b/system/nginx_prd.conf @@ -14,12 +14,7 @@ events { http { include /etc/nginx/mime.types; default_type application/octet-stream; - - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; - - access_log /dev/stdout; + access_log off; sendfile on; tcp_nopush on; @@ -40,19 +35,16 @@ http { location /photos { root /data; - access_log off; expires 1d; } location /thumbnails { root /data/cache; - access_log off; expires 1d; } location /static-collected { root /srv; - access_log off; expires 1d; } @@ -71,7 +63,6 @@ http { location / { root /srv/ui/build; - access_log off; expires 1d; ssi on; try_files $uri /index.html =404; diff --git a/system/run.sh b/system/run.sh index 8d50b807..d8e284a5 100755 --- a/system/run.sh +++ b/system/run.sh @@ -17,18 +17,15 @@ if [ "${ADMIN_PASSWORD}" != "" ]; then python /srv/photonix/manage.py create_admin_from_env fi -if [ "${DEMO}" = "1" ]; then - echo "Ensuring demo user, library and photos are created as we're running with DEMO=1 environment variable" +if [ "${DEMO}" = "1" ] || [ "${SAMPLE_DATA}" = "1" ]; then + echo "Ensuring demo user, library and photos are created as we're running with DEMO=1 or SAMPLE_DATA=1 environment variable" python /srv/photonix/manage.py import_demo_photos fi ->&2 echo "Scanning for new photos" -python /srv/photonix/manage.py rescan_photos - >&2 echo "Resetting Redis lock" python /srv/photonix/manage.py reset_redis_locks ->&2 echo "Reschedule any regeneration of thumbnails or analysis jobs" +>&2 echo "Rescheduling any required upgrade-related tasks" python /srv/photonix/manage.py housekeeping >&2 echo "Starting supervisor" diff --git a/system/supervisord.conf b/system/supervisord.conf index de87ddc0..92b2c5b2 100644 --- a/system/supervisord.conf +++ b/system/supervisord.conf @@ -29,9 +29,18 @@ stdout_logfile=/dev/stdout stderr_logfile_maxbytes=0 stdout_logfile_maxbytes=0 +[program:cron] +command = /bin/bash -c "declare -p | grep -Ev '^declare -[[:alpha:]]*r' > /run/supervisord.env && /usr/sbin/cron -f -L 15" +stderr_logfile=/dev/stderr +stdout_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_logfile_maxbytes=0 +autorestart = true +priority = 20 + [program:webpack] command=/srv/system/run_webpack_server.sh -startsecs=10 +startsecs=0 directory=/srv/ui stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -40,7 +49,7 @@ stdout_logfile_maxbytes=0 [program:storybook] command=/srv/system/run_storybook.sh -startsecs=30 +startsecs=0 directory=/srv/ui stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -49,7 +58,7 @@ stdout_logfile_maxbytes=0 [program:watch_photos] command=bash -c "nice -n 16 python /srv/photonix/manage.py watch_photos" -startsecs=12 +startsecs=10 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -58,7 +67,7 @@ stdout_logfile_maxbytes=0 [program:raw_scheduler] command=bash -c "sleep 5 && nice -n 17 python /srv/photonix/manage.py raw_scheduler" -startsecs=14 +startsecs=15 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -67,7 +76,7 @@ stdout_logfile_maxbytes=0 [program:raw_processor] command=bash -c "sleep 6 && nice -n 17 python /srv/photonix/manage.py raw_processor" -startsecs=15 +startsecs=16 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -76,7 +85,7 @@ stdout_logfile_maxbytes=0 [program:thumbnail_scheduler] command=bash -c "sleep 7 && nice -n 17 python /srv/photonix/manage.py thumbnail_processor" -startsecs=16 +startsecs=17 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -85,7 +94,7 @@ stdout_logfile_maxbytes=0 [program:classification_scheduler] command=bash -c "sleep 8 && nice -n 18 python /srv/photonix/manage.py classification_scheduler" -startsecs=17 +startsecs=18 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -94,7 +103,7 @@ stdout_logfile_maxbytes=0 [program:classification_color_processor] command=bash -c "sleep 9 && nice -n 19 python /srv/photonix/manage.py classification_color_processor" -startsecs=18 +startsecs=19 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -103,7 +112,16 @@ stdout_logfile_maxbytes=0 [program:classification_location_processor] command=bash -c "sleep 10 && nice -n 19 python /srv/photonix/manage.py classification_location_processor" -startsecs=19 +startsecs=20 +environment=PYTHONPATH=/srv +stderr_logfile=/dev/stderr +stdout_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_logfile_maxbytes=0 + +[program:classification_face_detection_processor] +command=bash -c "sleep 11 && nice -n 19 python /srv/photonix/manage.py classification_face_processor" +startsecs=21 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -111,8 +129,8 @@ stderr_logfile_maxbytes=0 stdout_logfile_maxbytes=0 [program:classification_style_processor] -command=bash -c "sleep 11 && nice -n 19 python /srv/photonix/manage.py classification_style_processor" -startsecs=20 +command=bash -c "sleep 12 && nice -n 19 python /srv/photonix/manage.py classification_style_processor" +startsecs=22 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout @@ -120,8 +138,17 @@ stderr_logfile_maxbytes=0 stdout_logfile_maxbytes=0 [program:classification_object_processor] -command=bash -c "sleep 12 && nice -n 19 python /srv/photonix/manage.py classification_object_processor" -startsecs=21 +command=bash -c "sleep 13 && nice -n 19 python /srv/photonix/manage.py classification_object_processor" +startsecs=23 +environment=PYTHONPATH=/srv +stderr_logfile=/dev/stderr +stdout_logfile=/dev/stdout +stderr_logfile_maxbytes=0 +stdout_logfile_maxbytes=0 + +[program:classification_event_processor] +command=bash -c "sleep 14 && nice -n 19 python /srv/photonix/manage.py classification_event_processor" +startsecs=24 environment=PYTHONPATH=/srv stderr_logfile=/dev/stderr stdout_logfile=/dev/stdout diff --git a/tests/factories.py b/tests/factories.py index f6e71d29..03904f75 100644 --- a/tests/factories.py +++ b/tests/factories.py @@ -24,8 +24,13 @@ class Meta: model = Library name = factory.Sequence(lambda n: f'Test Library {n}') + classification_color_enabled = True classification_location_enabled = True + classification_style_enabled = True classification_object_enabled = True + classification_face_enabled = True + setup_stage_completed = True + class LibraryUserFactory(factory.django.DjangoModelFactory): class Meta: @@ -35,6 +40,7 @@ class Meta: user = factory.SubFactory(UserFactory) owner = True + class PhotoFactory(factory.django.DjangoModelFactory): class Meta: model = Photo @@ -75,3 +81,4 @@ class Meta: type = 'classify.style' status = 'P' + library = factory.SubFactory(LibraryFactory) diff --git a/tests/photos/bad_date.jpg b/tests/photos/bad_date.jpg new file mode 100644 index 00000000..035b8b57 Binary files /dev/null and b/tests/photos/bad_date.jpg differ diff --git a/tests/photos/cmyk.tif b/tests/photos/cmyk.tif new file mode 100644 index 00000000..22479c3c Binary files /dev/null and b/tests/photos/cmyk.tif differ diff --git a/tests/photos/faces/Barbara_Becker_0001.jpg b/tests/photos/faces/Barbara_Becker_0001.jpg new file mode 100644 index 00000000..9be18434 Binary files /dev/null and b/tests/photos/faces/Barbara_Becker_0001.jpg differ diff --git a/tests/photos/faces/Boris_Becker_0003.jpg b/tests/photos/faces/Boris_Becker_0003.jpg new file mode 100644 index 00000000..b0325d3e Binary files /dev/null and b/tests/photos/faces/Boris_Becker_0003.jpg differ diff --git a/tests/photos/faces/Boris_Becker_0004.jpg b/tests/photos/faces/Boris_Becker_0004.jpg new file mode 100644 index 00000000..8b96d72f Binary files /dev/null and b/tests/photos/faces/Boris_Becker_0004.jpg differ diff --git a/tests/photos/faces/Boris_Becker_0005.jpg b/tests/photos/faces/Boris_Becker_0005.jpg new file mode 100644 index 00000000..5dca9e64 Binary files /dev/null and b/tests/photos/faces/Boris_Becker_0005.jpg differ diff --git a/tests/photos/faces/David_Beckham_0001.jpg b/tests/photos/faces/David_Beckham_0001.jpg new file mode 100644 index 00000000..4ec88911 Binary files /dev/null and b/tests/photos/faces/David_Beckham_0001.jpg differ diff --git a/tests/photos/faces/David_Beckham_0002.jpg b/tests/photos/faces/David_Beckham_0002.jpg new file mode 100644 index 00000000..48e8a80d Binary files /dev/null and b/tests/photos/faces/David_Beckham_0002.jpg differ diff --git a/tests/photos/faces/David_Beckham_0010.jpg b/tests/photos/faces/David_Beckham_0010.jpg new file mode 100644 index 00000000..8b6579f6 Binary files /dev/null and b/tests/photos/faces/David_Beckham_0010.jpg differ diff --git a/tests/photos/invalid_utf8.jpg b/tests/photos/invalid_utf8.jpg new file mode 100644 index 00000000..d7c56e8b Binary files /dev/null and b/tests/photos/invalid_utf8.jpg differ diff --git a/tests/photos/unreadable_date.jpg b/tests/photos/unreadable_date.jpg new file mode 100644 index 00000000..0e74c172 Binary files /dev/null and b/tests/photos/unreadable_date.jpg differ diff --git a/tests/test_classifier_batch.py b/tests/test_classifier_batch.py index 159c4f0d..c18de390 100644 --- a/tests/test_classifier_batch.py +++ b/tests/test_classifier_batch.py @@ -22,7 +22,7 @@ def test_classifier_batch(): photo = PhotoFactory() PhotoFileFactory(photo=photo) - for i in range(4): + for _ in range(4): TaskFactory(subject_id=photo.id) start = time() diff --git a/tests/test_classifier_models.py b/tests/test_classifier_models.py index 76694a8c..378dbb2e 100644 --- a/tests/test_classifier_models.py +++ b/tests/test_classifier_models.py @@ -3,6 +3,9 @@ from datetime import datetime from pathlib import Path +from django.conf import settings +from PIL import Image + def test_downloading(tmpdir): from photonix.classifiers.style.model import StyleModel @@ -43,19 +46,19 @@ def test_location_predict(): assert result['city']['population'] == 7556900 # In the sea near Oia, Santorini, Greece - Country is inferred from city - result = model.predict(location=[36.4396445,25.3560936]) + result = model.predict(location=[36.4396445, 25.3560936]) assert result['country']['name'] == 'Greece' assert result['city']['name'] == 'Oía' assert result['city']['distance'] == 3132 assert result['city']['population'] == 3376 # Too far off the coast of John o' Groats, Scotland, UK - No match - result = model.predict(location=[58.6876742,-3.4206862]) + result = model.predict(location=[58.6876742, -3.4206862]) assert result['country'] == None assert result['city'] == None # Vernier, Switzerland - Tests country code mainly (CH can be China in some codings) - result = model.predict(location=[46.1760906,5.9929043]) + result = model.predict(location=[46.1760906, 5.9929043]) assert result['country']['name'] == 'Switzerland' assert result['country']['code'] == 'CH' assert result['city']['country_name'] == 'Switzerland' @@ -74,7 +77,6 @@ def test_object_predict(): model = ObjectModel() snow = str(Path(__file__).parent / 'photos' / 'snow.jpg') result = model.predict(snow) -# import pdb; pdb.set_trace() assert len(result) == 3 @@ -105,3 +107,75 @@ def test_style_predict(): assert len(result) == 1 assert result[0][0] == 'serene' assert '{0:.3f}'.format(result[0][1]) == '0.962' + + # Check that there is no error when running with non-RGB image + cmyk = str(Path(__file__).parent / 'photos' / 'cmyk.tif') + result = model.predict(cmyk) + assert result == None + + +def test_face_predict(): + from photonix.classifiers.face.model import FaceModel + from photonix.classifiers.face.deepface.commons.distance import findEuclideanDistance + + TRAIN_FACES = [ + 'Boris_Becker_0003.jpg', + 'Boris_Becker_0004.jpg', + 'David_Beckham_0001.jpg', + 'David_Beckham_0002.jpg', + ] + TEST_FACES = [ + # Test image, nearest match in TRAIN_FACES, distance (3DP) + ('Boris_Becker_0005.jpg', 0, '9.614'), + ('David_Beckham_0010.jpg', 2, '10.956'), + ('Barbara_Becker_0001.jpg', 2, '15.736'), + ] + + embedding_cache = [] + model = FaceModel() + model.library_id = '00000000-0000-0000-0000-000000000000' + + # Calculate embeddings for training faces + for fn in TRAIN_FACES: + path = str(Path(__file__).parent / 'photos' / 'faces' / fn) + image_data = Image.open(path) + embedding = model.get_face_embedding(image_data) + embedding_cache.append(embedding) + + training_data = [(i, embedding) for i, embedding in enumerate(embedding_cache)] + + # Compare test faces using brute force Euclidian calculations + for fn, expected_nearest, expected_distance in TEST_FACES: + path = str(Path(__file__).parent / 'photos' / 'faces' / fn) + image_data = Image.open(path) + embedding = model.get_face_embedding(image_data) + nearest, distance = model.find_closest_face_tag_by_brute_force(embedding, target_data=training_data) + + assert nearest == expected_nearest + assert '{:.3f}'.format(distance) == expected_distance + assert findEuclideanDistance(embedding, embedding_cache[nearest]) == distance + + # Train ANN index + model.retrain_face_similarity_index(training_data=training_data) + + # Compare test faces using ANN trained index + for fn, expected_nearest, expected_distance in TEST_FACES: + path = str(Path(__file__).parent / 'photos' / 'faces' / fn) + image_data = Image.open(path) + embedding = model.get_face_embedding(image_data) + nearest, distance = model.find_closest_face_tag_by_ann(embedding) + + assert nearest == expected_nearest + assert '{:.3f}'.format(distance) == expected_distance + assert abs(findEuclideanDistance(embedding, embedding_cache[nearest]) - distance) < 0.000001 + + # Tidy up ANN model training + for fn in [ + f'faces_{model.library_id}.ann', + f'faces_tag_ids_{model.library_id}.json', + f'retrained_version_{model.library_id}.txt', + ]: + try: + os.remove(Path(settings.MODEL_DIR) / 'face' / fn) + except: + pass diff --git a/tests/test_graphql.py b/tests/test_graphql.py index f2539dcd..b8ee12f6 100644 --- a/tests/test_graphql.py +++ b/tests/test_graphql.py @@ -193,6 +193,7 @@ def test_library_setting_data(self): classificationStyleEnabled classificationObjectEnabled classificationLocationEnabled + classificationFaceEnabled } sourceFolder } @@ -202,14 +203,15 @@ def test_library_setting_data(self): data = get_graphql_content(response) assert response.status_code == 200 self.assertEqual(data['data']['librarySetting']['library']['name'], self.defaults['library'].name) - self.assertFalse(data['data']['librarySetting']['library']['classificationColorEnabled']) - self.assertFalse(data['data']['librarySetting']['library']['classificationStyleEnabled']) - assert data['data']['librarySetting']['library']['classificationObjectEnabled'] - assert data['data']['librarySetting']['library']['classificationLocationEnabled'] + self.assertTrue(data['data']['librarySetting']['library']['classificationColorEnabled']) + self.assertTrue(data['data']['librarySetting']['library']['classificationStyleEnabled']) + self.assertTrue(data['data']['librarySetting']['library']['classificationObjectEnabled']) + self.assertTrue(data['data']['librarySetting']['library']['classificationLocationEnabled']) + self.assertTrue(data['data']['librarySetting']['library']['classificationFaceEnabled']) self.assertEqual(data['data']['librarySetting']['sourceFolder'], self.defaults['library'].paths.all()[0].path) - def test_library_update_style_enabled_mutaion(self): - """Test library updateStyleEnabled mutaion response.""" + def test_library_update_style_enabled_mutation(self): + """Test library updateStyleEnabled mutation response.""" mutation = """ mutation updateStyleEnabled( $classificationStyleEnabled: Boolean! @@ -230,8 +232,8 @@ def test_library_update_style_enabled_mutaion(self): assert response.status_code == 200 assert tuple(tuple(data.values())[0].values())[0].get('classificationStyleEnabled') - def test_library_update_color_enabled_mutaion(self): - """Test library updateColorEnabled mutaion response.""" + def test_library_update_color_enabled_mutation(self): + """Test library updateColorEnabled mutation response.""" mutation = """ mutation updateColorEnabled( $classificationColorEnabled: Boolean! @@ -252,8 +254,8 @@ def test_library_update_color_enabled_mutaion(self): assert response.status_code == 200 assert tuple(tuple(data.values())[0].values())[0].get('classificationColorEnabled') - def test_library_update_location_enabled_mutaion(self): - """Test library updateLocationEnabled mutaion response.""" + def test_library_update_location_enabled_mutation(self): + """Test library updateLocationEnabled mutation response.""" mutation = """ mutation updateLocationEnabled( $classificationLocationEnabled: Boolean! @@ -274,8 +276,8 @@ def test_library_update_location_enabled_mutaion(self): assert response.status_code == 200 self.assertFalse(tuple(tuple(data.values())[0].values())[0].get('classificationLocationEnabled')) - def test_library_update_object_enabled_mutaion(self): - """Test library updateObjectEnabled mutaion response.""" + def test_library_update_object_enabled_mutation(self): + """Test library updateObjectEnabled mutation response.""" mutation = """ mutation updateObjectEnabled( $classificationObjectEnabled: Boolean! @@ -296,8 +298,8 @@ def test_library_update_object_enabled_mutaion(self): assert response.status_code == 200 self.assertFalse(tuple(tuple(data.values())[0].values())[0].get('classificationObjectEnabled')) - def test_library_update_source_folder_mutaion(self): - """Test library updateSourceFolder mutaion response.""" + def test_library_update_source_folder_mutation(self): + """Test library updateSourceFolder mutation response.""" mutation = """ mutation updateSourceFolder($sourceFolder: String!, $libraryId: ID) { updateSourceFolder( @@ -313,7 +315,7 @@ def test_library_update_source_folder_mutaion(self): self.assertEqual(tuple(tuple(data.values())[0].values())[0].get('sourceFolder'),self.defaults['library'].paths.all()[0].path) def test_change_password_mutation(self): - """Test change password mutaion response.""" + """Test change password mutation response.""" mutation = """ mutation changePassword ( $oldPassword: String!, @@ -688,6 +690,10 @@ def test_response_of_get_filters_api(self): id name } + allEventTags(libraryId: $libraryId, multiFilter: $multiFilter) { + id + name + } allCameras(libraryId: $libraryId) { id make @@ -827,8 +833,12 @@ def test_onboarding_steps(self): assert User.objects.first().has_configured_importing self.assertFalse(User.objects.first().has_configured_image_analysis) mutation = """ - mutation ($classificationColorEnabled: Boolean!,$classificationStyleEnabled: Boolean!, - $classificationObjectEnabled: Boolean!,$classificationLocationEnabled: Boolean!, + mutation ( + $classificationColorEnabled: Boolean!, + $classificationStyleEnabled: Boolean!, + $classificationObjectEnabled: Boolean!, + $classificationLocationEnabled: Boolean!, + $classificationFaceEnabled: Boolean!, $userId: ID!,$libraryId: ID!, ) { imageAnalysis(input:{ @@ -836,6 +846,7 @@ def test_onboarding_steps(self): classificationStyleEnabled:$classificationStyleEnabled, classificationObjectEnabled:$classificationObjectEnabled, classificationLocationEnabled:$classificationLocationEnabled, + classificationFaceEnabled:$classificationFaceEnabled, userId:$userId, libraryId:$libraryId, }) { @@ -847,8 +858,11 @@ def test_onboarding_steps(self): library_id = data['data']['PhotoImporting']['libraryId'] response = self.api_client.post_graphql( mutation, { - 'classificationColorEnabled': True, 'classificationStyleEnabled': True, - 'classificationObjectEnabled': False, 'classificationLocationEnabled': False, + 'classificationColorEnabled': True, + 'classificationStyleEnabled': True, + 'classificationObjectEnabled': False, + 'classificationLocationEnabled': False, + 'classificationFaceEnabled': False, 'userId': data['data']['PhotoImporting']['userId'], 'libraryId': data['data']['PhotoImporting']['libraryId'], }) diff --git a/tests/test_metadata.py b/tests/test_metadata.py index a386b504..5c124f9c 100644 --- a/tests/test_metadata.py +++ b/tests/test_metadata.py @@ -4,7 +4,7 @@ def test_metadata(): - # General exif metadata + # General EXIF metadata photo_path = str(Path(__file__).parent / 'photos' / 'snow.jpg') metadata = PhotoMetadata(photo_path) assert metadata.get('Image Size') == '800x600' @@ -12,6 +12,12 @@ def test_metadata(): assert metadata.get('Make') == 'Xiaomi' assert metadata.get('ISO') == '100' + # Ignore invalid UTF-8 that might be in the metadata + photo_path = str(Path(__file__).parent / 'photos' / 'invalid_utf8.jpg') + metadata = PhotoMetadata(photo_path) + assert len(metadata.get_all().keys()) > 30 + assert metadata.get('Artist') == '' + def test_location(): # Conversion from GPS exif data to latitude/longitude @@ -36,3 +42,14 @@ def test_datetime(): photo_path = str(Path(__file__).parent / 'photos' / 'snow-20100603.jpg') parsed_datetime = get_datetime(photo_path) assert parsed_datetime.isoformat() == '2010-06-03T00:00:00' + + # Date is parseable but has slashes instead of colons + photo_path = str(Path(__file__).parent / 'photos' / 'bad_date.jpg') + parsed_datetime = get_datetime(photo_path) + assert parsed_datetime.year == 2000 + assert parsed_datetime.isoformat() == '2000-01-01T00:00:00+00:00' + + # Some of the date digits are the letter X so fall back to file creation date + photo_path = str(Path(__file__).parent / 'photos' / 'unreadable_date.jpg') + parsed_datetime = get_datetime(photo_path) + assert parsed_datetime.isoformat() == '2021-08-08T21:11:25.231271+00:00' diff --git a/tests/test_task_queue.py b/tests/test_task_queue.py index 702dd472..4f900f8f 100644 --- a/tests/test_task_queue.py +++ b/tests/test_task_queue.py @@ -74,7 +74,7 @@ def test_tasks_created_updated(photo_fixture_snow): process_classify_images_tasks() task = Task.objects.get(type='classify_images', subject_id=photo_fixture_snow.id) assert task.status == 'S' - assert task.children.count() == 4 + assert task.children.count() == 6 assert task.complete_with_children == True # Completing all the child processes should set the parent task to completed diff --git a/tests/test_thumbnails.py b/tests/test_thumbnails.py index 7f798d0f..9cc85725 100644 --- a/tests/test_thumbnails.py +++ b/tests/test_thumbnails.py @@ -59,7 +59,7 @@ def test_view(photo_fixture_snow): # Now we should get the actual thumbnail image file assert response.status_code == 200 assert response.content[:10] == b'\xff\xd8\xff\xe0\x00\x10JFIF' - assert response._headers['content-type'][1] == 'image/jpeg' + assert response.headers['Content-Type'] == 'image/jpeg' response_length = len(response.content) assert response_length > 5929 * 0.8 assert response_length < 5929 * 1.2 diff --git a/ui/src/components/BoundingBoxes.js b/ui/src/components/BoundingBoxes.js index 96786018..76eb9f33 100644 --- a/ui/src/components/BoundingBoxes.js +++ b/ui/src/components/BoundingBoxes.js @@ -1,5 +1,12 @@ -import React from 'react' +import React, { useEffect, useRef } from 'react' import styled from '@emotion/styled' +import { useMutation } from '@apollo/client' +import { useDispatch, useSelector } from 'react-redux' +import { ReactComponent as EditIcon } from '../static/images/edit.svg' +import { ReactComponent as BlockIcon } from '../static/images/block_black.svg' +import { ReactComponent as DoneIcon } from '../static/images/done_black.svg' +import { EDIT_FACE_TAG, BLOCK_FACE_TAG, VERIFY_FACE_TAG } from '../graphql/tag' +import { isTagUpdated } from '../stores/tag/selector' const Container = styled('div')` width: 100%; @@ -9,6 +16,8 @@ const Container = styled('div')` .FeatureBox { border: 3px solid rgba(255, 0, 0, 0.75); position: absolute; + border-radius: 6px; + overflow: hidden; .FeatureLabel { color: #fff; font-size: 14px; @@ -16,10 +25,67 @@ const Container = styled('div')` display: inline-block; overflow: hidden; max-width: 100%; - padding: 0 5px 2px 2px; + padding: 1px 7px 2px 4px; float: left; text-align: left; white-space: nowrap; + pointer-events: all; + } + &.face { + cursor: default; + overflow: visible; + &.yellowBox { + border-color: rgba(255, 255, 0, 0.75); + .FeatureLabel { + color: #000; + background-color: rgba(255, 255, 0, 0.75); + } + } + &.greenBox { + border-color: rgba(0, 255, 0, 0.75); + .FeatureLabel { + color: #000; + background-color: rgba(0, 255, 0, 0.75); + } + } + &.whiteBox { + border-color: rgba(202, 202, 191, 0.5); + .FeatureLabel { + color: #000; + background-color: rgba(202, 202, 191, 0.5); + } + } + .FeatureEditText { + color: #000 !important; + width: 100%; + border: 0; + padding: 2px 4px; + } + .icons { + position: absolute; + bottom: -2px; + right: 2px; + width: max-content; + + svg { + background: #fff; + border-radius: 50%; + padding: 3px; + margin: 0 1px; + cursor: pointer; + &.FeatureIconEdit { + } + &.FeatureIconDelete { + background: #f00; + } + &.FeatureIconDone { + background: #0f0; + } + } + } + } + &.hideBox { + border: none; } } @@ -33,24 +99,171 @@ const Container = styled('div')` } } ` +const ENTER_KEY = 13 +const ESCAPE_KEY = 27 + +const BoundingBoxes = ({ + boxes, + className, + refetch, + showBoundingBox, + editLableId, + setEditLableId, +}) => { + const dispatch = useDispatch() + const ref = useRef(null) + const [editFaceTag] = useMutation(EDIT_FACE_TAG) + const [blockFaceTag] = useMutation(BLOCK_FACE_TAG) + const [verifyPhoto] = useMutation(VERIFY_FACE_TAG) + const tagUpdated = useSelector(isTagUpdated) + + const onHandleBlock = (event, photoTagId) => { + stopParentEventBehavior(event) + blockFaceTag({ + variables: { + photoTagId: photoTagId, + }, + }) + .then((res) => { + if (res.data.blockFaceTag.ok) { + refetch() + dispatch({ + type: 'IS_TAG_UPDATE', + payload: { updated: !tagUpdated }, + }) + } + }) + .catch((e) => {}) + } + + const onSaveLable = (event, photoTagId) => { + stopParentEventBehavior(event) + editFaceTag({ + variables: { + photoTagId: photoTagId, + newName: ref.current.value, + }, + }) + .then((res) => { + setEditLableId('') + if (res.data.editFaceTag.ok) { + refetch() + dispatch({ + type: 'IS_TAG_UPDATE', + payload: { updated: !tagUpdated }, + }) + } + }) + .catch((e) => { + setEditLableId('') + }) + } + + const onChangeLable = (event, photoTagId) => { + (event.keyCode === ENTER_KEY && + ref.current.value && + onSaveLable(event, photoTagId)) || + (event.keyCode === ESCAPE_KEY && setEditLableId('')) + } + + const setVerifyPhoto = (event, photoTagId) => { + stopParentEventBehavior(event) + verifyPhoto({ + variables: { + photoTagId: photoTagId, + }, + }) + .then((res) => { + if (res.data.verifyPhoto.ok) refetch() + }) + .catch((e) => {}) + } + + useEffect(() => { + if (ref?.current) { + ref.current.focus() + } + }, [editLableId]) + + const updateEditState = (event, boxId) => { + stopParentEventBehavior(event) + setEditLableId(boxId) + } + + const stopParentEventBehavior = (event) => { + event.stopPropagation() + } -const BoundingBoxes = ({ boxes }) => { return ( - {boxes.map((box, index) => { + {boxes?.map((box, index) => { let left = (box.positionX - box.sizeX / 2) * 100 + '%' let top = (box.positionY - box.sizeY / 2) * 100 + '%' let width = box.sizeX * 100 + '%' let height = box.sizeY * 100 + '%' + // console.log(box) return (
-
- {box.name} -
+ {showBoundingBox && + (editLableId === box.id ? ( + onChangeLable(e, box.id)} + ref={ref} + onMouseDown={(e) => stopParentEventBehavior(e)} + onClick={(e) => stopParentEventBehavior(e)} + /> + ) : ( + !box.deleted && ( +
+ {box.name} +
+ ) + ))} + {className === 'face' && ( +
+ {editLableId === box.id ? ( + onSaveLable(e, box.id)} + /> + ) : ( + <> + {!box.verified && !box.deleted && ( + onHandleBlock(e, box.id)} + title="Reject automatic face tag" + /> + )} + {box.showVerifyIcon && !box.deleted && ( + setVerifyPhoto(e, box.id)} + title="Approve automatic face tag" + /> + )} + updateEditState(e, box.id)} + title="Edit person’s name" + /> + + )} +
+ )}
) })} diff --git a/ui/src/components/Browse.js b/ui/src/components/Browse.js index 4e814687..b1de1806 100644 --- a/ui/src/components/Browse.js +++ b/ui/src/components/Browse.js @@ -117,7 +117,6 @@ const Browse = ({ 'searchExpanded', window.innerHeight > 850 ? true : false ) - let content = mode === 'MAP' ? ( @@ -142,6 +141,7 @@ const Browse = ({ onFilterToggle={onFilterToggle} onClearFilters={onClearFilters} updateSearchText={updateSearchText} + searchAreaExpand={expanded} />
{ +const Filters = ({ data, selectedFilters, onToggle, searchAreaExpand }) => { const [values, setValues] = useState({ 'ISO Speed': [], 'Focal Length': [], @@ -31,7 +31,6 @@ const Filters = ({ data, selectedFilters, onToggle }) => { Rating: [], }) const [isDomainAvail, setIsDomainAvail] = useState(false) - useEffect(() => { const vals = [] data.map((group) => { @@ -109,7 +108,13 @@ const Filters = ({ data, selectedFilters, onToggle }) => { return ( {isDomainAvail && ( -
+
{data.map((group) => { let items = '' let filterGroupExtraStyles = {} @@ -222,10 +227,7 @@ const Filters = ({ data, selectedFilters, onToggle }) => { return ( {showTagSection(items, group.name) && ( -
+

{group.name}

{items}
diff --git a/ui/src/components/Header.js b/ui/src/components/Header.js index 22990226..688ac183 100644 --- a/ui/src/components/Header.js +++ b/ui/src/components/Header.js @@ -1,12 +1,12 @@ -import React from 'react' +import React, { useRef, useState, useEffect } from 'react' import { useSelector } from 'react-redux' import styled from '@emotion/styled' import User from './User' +import Notification from './Notification' import { getIsMobileApp, getSafeArea } from '../stores/layout/selector' import logo from '../static/images/logo.svg' import menuIcon from '../static/images/menu.svg' -// import notifications from '../static/images/notifications.svg' const Container = styled('div')` height: 50px; @@ -45,15 +45,39 @@ const Container = styled('div')` .navigation { flex-grow: 1; } - .notifications { - width: 50px; - } ` +export const useComponentVisible = (initialIsVisible, type) => { + const [isComponentVisible, setIsComponentVisible] = useState(initialIsVisible) + const ref = useRef(null) + + const handleHideDropdown = (event: KeyboardEvent) => { + if (event.key === 'Escape') { + setIsComponentVisible(false) + } + } + + const handleClickOutside = (event) => { + if (ref.current && !ref.current.contains(event.target)) { + setIsComponentVisible(false) + } + } + useEffect(() => { + document.addEventListener('keydown', handleHideDropdown, false) + document.addEventListener('click', handleClickOutside, false) + return () => { + document.removeEventListener('keydown', handleHideDropdown, true) + document.removeEventListener('click', handleClickOutside, true) + } + }) + + return { ref, isComponentVisible, setIsComponentVisible } +} const Header = ({ profile, libraries }) => { const isMobileApp = useSelector(getIsMobileApp) const safeArea = useSelector(getSafeArea) - + const [showNotification, setShowNotification] = useState(false) + const [showUserMenu, setShowUserMenu] = useState(false) return ( { Photonix
- {/*
- Notifications -
*/} - + + ) } diff --git a/ui/src/components/Init.js b/ui/src/components/Init.js index bdea4552..a0d2e46a 100644 --- a/ui/src/components/Init.js +++ b/ui/src/components/Init.js @@ -1,8 +1,15 @@ import React from 'react' import { Provider } from 'react-redux' import { createStore } from 'redux' -import { ApolloClient, ApolloLink, ApolloProvider, from, HttpLink, InMemoryCache } from '@apollo/client' -import { RetryLink } from "@apollo/client/link/retry"; +import { + ApolloClient, + ApolloLink, + ApolloProvider, + from, + HttpLink, + InMemoryCache, +} from '@apollo/client' +import { RetryLink } from '@apollo/client/link/retry' import { Router } from 'react-router-dom' import { ModalContainer } from 'react-router-modal' // import { ThemeProvider, CSSReset } from '@chakra-ui/core' @@ -11,6 +18,7 @@ import { ThemeProvider, ColorModeProvider } from '@chakra-ui/core' import history from '../history' import reducers from './../stores' import customTheme from '../theme' +import { logOut } from '../auth' export const store = createStore( reducers, @@ -22,26 +30,39 @@ window.photonix = { } const additiveLink = from([ - new RetryLink(), + new RetryLink({ + delay: { + initial: 500, + max: Infinity, + jitter: true, + }, + attempts: { + max: 30, + }, + }), new ApolloLink((operation, forward) => { return forward(operation).map((data) => { // Raise GraphQL errors as exceptions that trigger RetryLink when re-authentication is in progress if (data && data.errors && data.errors.length > 0) { - throw new Error('GraphQL Operational Error'); + if (data.errors[0].message === 'Error decoding signature') { + // Probably the Django SECRET_KEY changed so the user needs to re-authenticate. + logOut() + } + throw new Error('GraphQL Operational Error') } - return data; - }); + return data + }) }), new HttpLink({ uri: '/graphql', credentials: 'same-origin', // Required for older versions of Chromium (~v58) - }) -]); + }), +]) const client = new ApolloClient({ cache: new InMemoryCache(), - link: additiveLink -}); + link: additiveLink, +}) const Init = ({ children }) => { const isMobileApp = navigator.userAgent.indexOf('PhotonixMobileApp') > -1 diff --git a/ui/src/components/Login.js b/ui/src/components/Login.js index df58d157..a3af054f 100644 --- a/ui/src/components/Login.js +++ b/ui/src/components/Login.js @@ -52,6 +52,7 @@ const ENVIRONMENT = gql` { environment { demo + sampleData firstRun form userId @@ -134,7 +135,7 @@ const Login = (props) => { ref={(node) => { inputUsername = node }} - defaultValue={envData && envData.environment.demo ? 'demo' : ''} + defaultValue={envData && (envData.environment.demo || envData.environment.sampleData) ? 'demo' : ''} /> @@ -144,7 +145,7 @@ const Login = (props) => { ref={(node) => { inputPassword = node }} - defaultValue={envData && envData.environment.demo ? 'demo' : ''} + defaultValue={envData && (envData.environment.demo || envData.environment.sampleData) ? 'demo' : ''} />
@@ -60,7 +99,7 @@ const MapView = ({ return (
- + {tileLayer} {markers} @@ -73,13 +112,11 @@ MapView.propTypes = { photos: PropTypes.array, bounds: PropTypes.func, location: PropTypes.array, - zoom: PropTypes.number, maxZoom: PropTypes.number, hideAttribution: PropTypes.bool, } MapView.defaultProps = { - zoom: 2, maxZoom: 15, } diff --git a/ui/src/components/ModalForm.js b/ui/src/components/ModalForm.js index e6f00e30..fec13b50 100644 --- a/ui/src/components/ModalForm.js +++ b/ui/src/components/ModalForm.js @@ -50,6 +50,7 @@ const ModalForm = ({ const [stepFiveRegistration] = useMutation(STEP_FIVE) const onSubmit = (data) => { if (nextStep === '/onboarding/step2') { + localStorage.setItem("isSignin", false); stepOneRegistration({ variables: { username: data.username, @@ -134,6 +135,7 @@ const ModalForm = ({ classificationStyleEnabled: data.classificationStyleEnabled, classificationObjectEnabled: data.classificationObjectEnabled, classificationLocationEnabled: data.classificationLocationEnabled, + classificationFaceEnabled: data.classificationFaceEnabled, userId: envData.environment.userId, libraryId: LibraryId ? LibraryId : envData.environment.libraryId, }, diff --git a/ui/src/components/Notification.js b/ui/src/components/Notification.js new file mode 100644 index 00000000..e20841f6 --- /dev/null +++ b/ui/src/components/Notification.js @@ -0,0 +1,321 @@ +import React, { useState, useEffect } from 'react' +import styled from '@emotion/styled' +import { Progress, Box, Flex } from '@chakra-ui/core' +import { useQuery, useMutation } from '@apollo/client' +import { useSelector } from 'react-redux' + +import notifications from '../static/images/notifications.svg' +import play from '../static/images/play.svg' +import pause from '../static/images/pause.svg' +import { GET_TASK_PROGRESS } from '../graphql/settings' +import { getActiveLibrary } from '../stores/libraries/selector' +import { useComponentVisible } from './Header' +import { useSettings } from './Settings' +import { + SETTINGS_STYLE, + SETTINGS_COLOR, + SETTINGS_LOCATION, + SETTINGS_OBJECT, + SETTINGS_FACE, + GET_SETTINGS, +} from '../graphql/settings' + +const Container = styled('div')` + margin-right: 10px; + > img { + filter: invert(0.9); + padding: 10px 0 10px 10px; + width: 50px; + height: 50px; + cursor: pointer; + } + .notificationMenu { + position: absolute; + width: 400px; + right: 0px; + top: 50px; + z-index: 10; + background: #484848; + margin: 0; + list-style: none; + padding: 0; + box-shadow: -3px 8px 17px rgba(0, 0, 0, 0.15); + } + .isMobileApp header .notificationMenu { + top: 80px; + } + .notificationMenu li { + padding: 12px 15px 12px 15px; + cursor: default; + font-size: 16px; + } + .notificationMenu li:hover { + background: rgba(255, 255, 255, 0.1); + } + .notificationMenu li img { + padding: 0; + width: 35px; + height: 35px; + vertical-align: -6px; + margin-right: 10px; + filter: invert(0.9); + cursor: pointer; + } + @media (max-width: 767px) { + .notificationMenu { + width: 290px; + } + .notificationMenu li { + font-size: 13px; + } + } +` +const Notification = (props) => { + const activeLibrary = useSelector(getActiveLibrary) + const [settings, setSettings] = useSettings(activeLibrary) + const [showNotificationIcon, setShowNotificationIcon] = useState(false) + const { ref, isComponentVisible, setIsComponentVisible } = + useComponentVisible(false) + const { showNotification, setShowNotification, setShowUserMenu } = props + + const handleShowMenu = () => { + if (!showNotification) { + setIsComponentVisible(true) + setShowNotification(true) + setShowUserMenu(false) + settingsRefetch() + } + } + + const { data, refetch } = useQuery(GET_TASK_PROGRESS) + const { refetch: settingsRefetch } = useQuery(GET_SETTINGS, { + variables: { libraryId: activeLibrary?.id }, + }) + const [settingUpdateStyle] = useMutation(SETTINGS_STYLE) + const [settingUpdateColor] = useMutation(SETTINGS_COLOR) + const [settingUpdateLocation] = useMutation(SETTINGS_LOCATION) + const [settingUpdateObject] = useMutation(SETTINGS_OBJECT) + const [settingUpdateFace] = useMutation(SETTINGS_FACE) + + useEffect(() => { + const interval = isComponentVisible ? 3000 : 15000 + let handle = setInterval(refetch, interval) + return () => { + clearInterval(handle) + } + }) + + useEffect(() => { + if (!isComponentVisible) setShowNotification(false) + }, [isComponentVisible, setShowNotification]) + + useEffect(() => { + if (data) { + getKeys(data).map((key) => { + let remaining = data.taskProgress[key]?.remaining + if (remaining === 0) { + window.sessionStorage.setItem(key, 0) + } else if (remaining > window.sessionStorage.getItem(key)) { + window.sessionStorage.setItem(key, remaining) + !showNotificationIcon && setShowNotificationIcon(true) + } + return key + }) + } + }, [data, showNotificationIcon]) + + const getTitle = (key) => { + switch (key) { + case 'generateThumbnails': + return 'Generating thumbnails' + case 'processRaw': + return 'Processing raw files' + case 'classifyColor': + return 'Analyzing colors' + case 'classifyObject': + return 'Analyzing objects' + case 'classifyLocation': + return 'Analyzing locations' + case 'classifyStyle': + return 'Analyzing styles' + case 'classifyFace': + return 'Analyzing faces' + default: + return '' + } + } + + const getKeys = (data) => { + let keys = Object.keys(data.taskProgress) + return keys + } + + const getNotificationKeys = (data) => { + const keys = getKeys(data) + const remaining = keys.filter((k) => data.taskProgress[k].remaining > 0) + if (remaining.length) { + !showNotificationIcon && setShowNotificationIcon(true) + } else { + showNotificationIcon && setShowNotificationIcon(false) + isComponentVisible && setIsComponentVisible(false) + } + return remaining + } + + const getProgressPercent = (key) => { + return ( + ((window.sessionStorage.getItem(key) - + data.taskProgress[key]?.remaining) / + window.sessionStorage.getItem(key)) * + 100 + ) + } + + const getSettingsKey = (key) => { + switch (key) { + case 'classifyObject': + return 'classificationObjectEnabled' + case 'classifyColor': + return 'classificationColorEnabled' + case 'classifyLocation': + return 'classificationLocationEnabled' + case 'classifyStyle': + return 'classificationStyleEnabled' + case 'classifyFace': + return 'classificationFaceEnabled' + default: + return null + } + } + + const toggleBooleanSetting = (key) => { + let newSettings = { ...settings } + newSettings[getSettingsKey(key)] = !settings[getSettingsKey(key)] + setSettings(newSettings) + switch (getSettingsKey(key)) { + case 'classificationStyleEnabled': + settingUpdateStyle({ + variables: { + classificationStyleEnabled: newSettings.classificationStyleEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key + case 'classificationLocationEnabled': + settingUpdateLocation({ + variables: { + classificationLocationEnabled: + newSettings.classificationLocationEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key + case 'classificationObjectEnabled': + settingUpdateObject({ + variables: { + classificationObjectEnabled: + newSettings.classificationObjectEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key + case 'classificationColorEnabled': + settingUpdateColor({ + variables: { + classificationColorEnabled: newSettings.classificationColorEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key + case 'classificationFaceEnabled': + settingUpdateFace({ + variables: { + classificationFaceEnabled: newSettings.classificationFaceEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key + default: + return null + } + } + + const getRemaining = (remaining, totalRunning) => { + return remaining === '0' + ? '1' + : Math.abs(parseInt(remaining) - parseInt(totalRunning)) + } + + const getTotalRunning = (remaining, totalRunning) => { + return totalRunning === '0' ? remaining : totalRunning + } + + return ( + <> + {showNotificationIcon ? ( + + Notification +
    + {data + ? getNotificationKeys(data).map((key, index) => ( +
  • + + + + {getTitle(key)} + + {getRemaining( + data.taskProgress[key]?.remaining, + window.sessionStorage.getItem(key) + )} + / + {getTotalRunning( + data.taskProgress[key]?.remaining, + window.sessionStorage.getItem(key) + )} + + + + + + {key !== 'generateThumbnails' && + key !== 'processRaw' ? ( + settings[getSettingsKey(key)] ? ( + toggleBooleanSetting(key)} + alt="pause" + /> + ) : ( + toggleBooleanSetting(key)} + alt="play" + /> + ) + ) : null} + + +
  • + )) + : null} +
+
+ ) : null} + + ) +} + +export default Notification diff --git a/ui/src/components/PhotoDetail.js b/ui/src/components/PhotoDetail.js index e769190e..abdea458 100644 --- a/ui/src/components/PhotoDetail.js +++ b/ui/src/components/PhotoDetail.js @@ -12,6 +12,7 @@ import PhotoMetadata from './PhotoMetadata' import { getSafeArea } from '../stores/layout/selector' import { getPrevNextPhotos } from '../stores/photos/selector' +import { ReactComponent as DownloadIcon } from '../static/images/download_arrow.svg' import { ReactComponent as ArrowBackIcon } from '../static/images/arrow_back.svg' import { ReactComponent as ArrowLeftIcon } from '../static/images/arrow_left.svg' import { ReactComponent as ArrowRightIcon } from '../static/images/arrow_right.svg' @@ -117,6 +118,14 @@ const Container = styled('div')` cursor: pointer; z-index: 10; } + .showDownloadIcon { + position: absolute; + right: 50px; + top: 10px; + filter: invert(0.9); + cursor: pointer; + z-index: 10; + } /* When two boxes can no longer fit next to each other */ @media all and (max-width: 500px) { @@ -146,13 +155,13 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { ) const [fetchNextPrevious, setFetchNextPrevious] = useState(false) - const [firstPrevious, setFirstPrevious] = useLocalStorageState( 'firstPrevious', 0 ) const timelinePhotoIds = useSelector(photos) + const [showTopIcons, setShowTopIcons] = useState(true) // TODO: Bring this back so it doesn't get triggered by someone adding a tag with 'i' in it // useEffect(() => { @@ -301,39 +310,69 @@ const PhotoDetail = ({ photoId, photo, refetch, updatePhotoFile }) => { } }, [photoId, prevNextPhotos, prevPhoto, nextPhoto]) - let boxes = photo?.objectTags.map((objectTag) => { - return { - name: objectTag.tag.name, - positionX: objectTag.positionX, - positionY: objectTag.positionY, - sizeX: objectTag.sizeX, - sizeY: objectTag.sizeY, - } - }) + const setBoxColorClass = (tag) => { + return tag.deleted ? 'whiteBox' : tag.verified ? 'greenBox' : 'yellowBox' + } + + let boxes = { + object: photo?.objectTags.map((objectTag) => { + return { + name: objectTag.tag.name, + positionX: objectTag.positionX, + positionY: objectTag.positionY, + sizeX: objectTag.sizeX, + sizeY: objectTag.sizeY, + } + }), + face: photo?.personTags.map((tag) => { + return { + id: tag.id, + name: tag.tag.name, + positionX: tag.positionX, + positionY: tag.positionY, + sizeX: tag.sizeX, + sizeY: tag.sizeY, + verified: tag.verified, + deleted: tag.deleted, + boxColorClass: setBoxColorClass(tag), + showVerifyIcon: tag.showVerifyIcon, + } + }), + } + return ( -
- { - if (document.referrer != '') { - history.go(-1) - } else { - history.push('/') - } - }} - /> -
+ {showTopIcons && ( +
+ { + if (document.referrer !== '') { + history.goBack() + } else { + history.push('/') + } + }} + /> +
+ )}
{ updatePhotoFile={updatePhotoFile} /> )} - {!showMetadata ? ( - setShowMetadata(!showMetadata)} - style={{ marginTop: safeArea.top }} - // title="Press [I] key to show/hide photo details" - /> - ) : ( - setShowMetadata(!showMetadata)} - style={{ marginTop: safeArea.top }} - // title="Press [I] key to show/hide photo details" - /> + + {showTopIcons && + (!showMetadata ? ( + setShowMetadata(!showMetadata)} + style={{ marginTop: safeArea.top }} + // title="Press [I] key to show/hide photo details" + /> + ) : ( + setShowMetadata(!showMetadata)} + style={{ marginTop: safeArea.top }} + // title="Press [I] key to show/hide photo details" + /> + ))} + {showTopIcons && photo?.downloadUrl && ( + + + )} ) diff --git a/ui/src/components/PhotoMetadata.js b/ui/src/components/PhotoMetadata.js index c549011f..4b2809a6 100644 --- a/ui/src/components/PhotoMetadata.js +++ b/ui/src/components/PhotoMetadata.js @@ -191,6 +191,9 @@ const PhotoMetadata = ({ return arr[arr.length - 1] } + // To show people tag list without having any blocked tag. + const personTagsList = photo.personTags.filter(personTags => !personTags.deleted); + return (
@@ -275,7 +278,7 @@ const PhotoMetadata = ({ )}
- {photo.locationTags.length ? ( + {photo.locationTags.length > 0 && (

Locations

- ) : ( - '' )} - {photo.location ? ( + {photo.location && (

Map

{}
- ) : ( - '' )} - {photo.colorTags.length ? ( + {photo.colorTags.length > 0 && (

Colors

- ) : ( - '' )} - {photo.objectTags.length ? ( + {personTagsList.length > 0 && ( +
+

+ People + {showBoundingBox ? ( + setShowBoundingBox(false)} /> + ) : ( + setShowBoundingBox(true)} /> + )} +

+
    + {personTagsList.map((photoTag, index) => ( +
  • {photoTag.tag.name}
  • + ))} +
+
+ )} + {photo.objectTags.length > 0 && (

Objects @@ -328,10 +342,8 @@ const PhotoMetadata = ({ ))}

- ) : ( - '' )} - {photo.styleTags.length ? ( + {photo.styleTags.length > 0 && (

Styles

    @@ -340,6 +352,16 @@ const PhotoMetadata = ({ ))}
+ )} + {photo.eventTags.length ? ( +
+

Events

+
    + {photo.eventTags.map((photoTag, index) => ( +
  • {photoTag.tag.name}
  • + ))} +
+
) : ( '' )} diff --git a/ui/src/components/ScrollArea.js b/ui/src/components/ScrollArea.js index 3b8cdeb6..318b6ebe 100644 --- a/ui/src/components/ScrollArea.js +++ b/ui/src/components/ScrollArea.js @@ -1,6 +1,5 @@ import React from 'react' - export default class ScrollArea extends React.Component { constructor(props) { super(props) @@ -40,12 +39,17 @@ export default class ScrollArea extends React.Component { componentDidUpdate = () => { this.init() - if (!this.initialised && this.containerRef.current && this.scrollbarHandleRef.current) { + if ( + !this.initialised && + this.containerRef.current && + this.scrollbarHandleRef.current + ) { this.forceUpdate(this.init()) - } - else if (!this.initialised) { + } else if (!this.initialised) { // Occasionally we get refs before the painting has completed so we have to force an update - setTimeout(() => {this.forceUpdate()}, 100) + setTimeout(() => { + this.forceUpdate() + }, 100) } } @@ -63,11 +67,16 @@ export default class ScrollArea extends React.Component { } if (this.containerRef.current) { - this.contentWidth = this.containerRef.current.firstChild.clientWidth + this.padding - this.contentViewWidth = this.containerRef.current.clientWidth + (2 * this.padding) - this.contentScrollRange = this.contentWidth - this.contentViewWidth + (2 * this.padding) - this.scrollbarWidth = this.containerRef.current.parentElement.clientWidth - (2 * this.padding) - this.scrollbarScrollRange = this.scrollbarWidth - this.scrollbarHandleWidth + this.contentWidth = + this.containerRef.current.firstChild.clientWidth + this.padding + this.contentViewWidth = + this.containerRef.current.clientWidth + 2 * this.padding + this.contentScrollRange = + this.contentWidth - this.contentViewWidth + 2 * this.padding + this.scrollbarWidth = + this.containerRef.current.parentElement.clientWidth - 2 * this.padding + this.scrollbarScrollRange = + this.scrollbarWidth - this.scrollbarHandleWidth } } @@ -75,16 +84,23 @@ export default class ScrollArea extends React.Component { if (this.containerRef.current) { this.contentOffset = this.containerRef.current.scrollLeft this.scrollProgress = this.contentOffset / this.contentScrollRange - this.scrollbarLeft = parseInt((this.padding) + (this.scrollProgress * this.scrollbarScrollRange), 10) + this.scrollbarLeft = parseInt( + this.padding + this.scrollProgress * this.scrollbarScrollRange, + 10 + ) this.scrollbarHandleRef.current.style.left = this.scrollbarLeft + 'px' - this.scrollbarHandleRef.current.style.width = this.scrollbarHandleWidth + 'px' + this.scrollbarHandleRef.current.style.width = + this.scrollbarHandleWidth + 'px' this.initialised = true } } positionViewport = () => { this.scrollProgress = this.dragOffset / this.scrollbarScrollRange - this.contentLeft = parseInt(this.scrollProgress * this.contentScrollRange, 10) + this.contentLeft = parseInt( + this.scrollProgress * this.contentScrollRange, + 10 + ) this.containerRef.current.scrollLeft = this.contentLeft this.positionScrollbar() } @@ -100,7 +116,7 @@ export default class ScrollArea extends React.Component { document.onmouseup = this.scrollbarRelease document.onmousemove = this.scrollbarDrag if (!this.state.displayScrollbar) { - this.setState({displayScrollbar: true}) + this.setState({ displayScrollbar: true }) } } @@ -121,25 +137,45 @@ export default class ScrollArea extends React.Component { document.onmousemove = null document.ontouchend = null document.ontouchmove = null - this.setState({displayScrollbar: false}) + this.setState({ displayScrollbar: false }) } scrollbarDrag = (e) => { e.preventDefault() - this.dragOffset = e.clientX - (this.mouseDownStart - this.scrollbarStart) - this.padding + this.dragOffset = + e.clientX - (this.mouseDownStart - this.scrollbarStart) - this.padding this.positionViewport() } scrollbarDragTouch = (e) => { - this.dragOffset = e.touches[0].clientX - (this.mouseDownStart - this.scrollbarStart) - this.padding + this.dragOffset = + e.touches[0].clientX - + (this.mouseDownStart - this.scrollbarStart) - + this.padding this.positionViewport() } + // To stop auto scroll animation after one time. + stopScrollAnimation = (e) => { + localStorage.setItem('filtersPeeked', true) + } + render = () => ( <> -
+
{this.props.children} -
+
) diff --git a/ui/src/components/Search.js b/ui/src/components/Search.js index ce460895..e3ff330f 100644 --- a/ui/src/components/Search.js +++ b/ui/src/components/Search.js @@ -1,4 +1,4 @@ -import React from 'react' +import React, { useState } from 'react' import styled from '@emotion/styled' import FiltersContainer from '../containers/FiltersContainer' @@ -25,7 +25,9 @@ const Search = ({ onClearFilters, search, updateSearchText, + searchAreaExpand }) => { + const [filters, setFilters] = useState([]) return ( ) diff --git a/ui/src/components/SearchInput.js b/ui/src/components/SearchInput.js index 656a7b83..adb24ada 100644 --- a/ui/src/components/SearchInput.js +++ b/ui/src/components/SearchInput.js @@ -1,12 +1,13 @@ -import React from 'react' +import React, { useState, useEffect, useCallback } from 'react' import PropTypes from 'prop-types' - import '../static/css/SearchInput.css' import { ReactComponent as CloseIcon } from '../static/images/close.svg' import { ReactComponent as ObjectsIcon } from '../static/images/label.svg' import { ReactComponent as LocationsIcon } from '../static/images/location_on.svg' import { ReactComponent as ColorsIcon } from '../static/images/color_lens.svg' import { ReactComponent as StylesIcon } from '../static/images/style.svg' +import { ReactComponent as PeopleIcon } from '../static/images/person.svg' +import { ReactComponent as EventsIcon } from '../static/images/event.svg' import { ReactComponent as CamerasIcon } from '../static/images/photo_camera.svg' import { ReactComponent as StarIcon } from '../static/images/star_outline.svg' @@ -14,8 +15,10 @@ const GROUP_ICONS = { 'Generic Tags': ObjectsIcon, Objects: ObjectsIcon, Locations: LocationsIcon, + People: PeopleIcon, Colors: ColorsIcon, Styles: StylesIcon, + Events: EventsIcon, Cameras: CamerasIcon, Lenses: CamerasIcon, Aperture: CamerasIcon, @@ -29,18 +32,132 @@ const GROUP_ICONS = { Rating: StarIcon, } +const ENTER_KEY = 13 +const UP_KEY = 38 +const DOWN_KEY = 40 + const SearchInput = ({ selectedFilters, search, onFilterToggle, onClearFilters, onSearchTextChange, + filters, }) => { + const [activeOption, setActiveOption] = useState(0) + const [filteredOptions, setFilteredOptions] = useState([]) + const [showOptions, setShowOptions] = useState(false) + const [options, setOptions] = useState([]) + + const prepareOptions = useCallback(() => { + let searchOptions = [] + filters.map((f) => { + f.items.map((i) => { + i['type'] = f.name + searchOptions.push(i) + return i + }) + return f + }) + setOptions(searchOptions) + }, [filters, setOptions]) + + useEffect(() => { + if (filters.length) prepareOptions() + }, [filters, prepareOptions]) + + const handleOnChange = (e) => { + onSearchTextChange(e.target.value) + const userInput = e.currentTarget.value + const filteredOptions = options.filter( + (optionName) => + optionName.name.toLowerCase().indexOf(userInput.toLowerCase()) > -1 + ) + setActiveOption(0) + setFilteredOptions(filteredOptions) + setShowOptions(true) + } + + const onKeyDown = (e) => { + if (e.keyCode === ENTER_KEY) { + onSearchTextChange('') + setActiveOption(0) + setShowOptions(false) + filteredOptions[activeOption] && + onFilterToggle( + filteredOptions[activeOption].id, + filteredOptions[activeOption].type, + filteredOptions[activeOption].name + ) + } else if (e.keyCode === UP_KEY) { + if (activeOption === 0) return + setActiveOption(activeOption - 1) + } else if (e.keyCode === DOWN_KEY) { + if (activeOption === filteredOptions.length - 1) return + setActiveOption(activeOption + 1) + } + } + + const handleOnClick = (index) => { + setActiveOption(0) + setFilteredOptions([]) + setShowOptions(false) + onSearchTextChange('') + onFilterToggle( + filteredOptions[index].id, + filteredOptions[index].type, + filteredOptions[index].name + ) + } + + let optionList + if (showOptions && search) { + if (filteredOptions.length) { + optionList = ( +
    + {filteredOptions.map((opt, index) => { + let className + if (index === activeOption) className = 'option-active' + + let icon = React.createElement(GROUP_ICONS[opt.type], { + className: 'groupIcon', + alt: opt.group, + }) + return ( +
  • handleOnClick(index)} + > +
    + {icon} + {opt.name} +
    + {opt.type} +
  • + ) + })} +
+ ) + } else { + optionList = ( +
+ No Option! +
+ ) + } + } + return (
-
    +
      {selectedFilters.map((filter) => { - let icon = React.createElement(GROUP_ICONS[filter.group], { + let icon = ObjectsIcon + if (GROUP_ICONS[filter.group]) { + icon = GROUP_ICONS[filter.group] + } + icon = React.createElement(icon, { className: 'groupIcon', alt: filter.group, }) @@ -58,8 +175,10 @@ const SearchInput = ({ type="text" placeholder="Search" value={search} - onChange={onSearchTextChange} + onChange={handleOnChange} + onKeyDown={onKeyDown} /> + {optionList}
diff --git a/ui/src/components/Settings.js b/ui/src/components/Settings.js index 37f63ca9..6ca18c00 100644 --- a/ui/src/components/Settings.js +++ b/ui/src/components/Settings.js @@ -1,4 +1,4 @@ -import React, { useState, useEffect, useRef } from 'react' +import React, { useState, useEffect } from 'react' import { useQuery, useMutation } from '@apollo/client' import { useSelector } from 'react-redux' import { getActiveLibrary } from '../stores/libraries/selector' @@ -8,9 +8,9 @@ import { Flex, Stack, FormLabel, - Input, - InputGroup, - IconButton, + // Input, + // InputGroup, + // IconButton, } from '@chakra-ui/core' import Modal from './Modal' @@ -19,7 +19,8 @@ import { SETTINGS_COLOR, SETTINGS_LOCATION, SETTINGS_OBJECT, - SETTINGS_SOURCE_FOLDER, + SETTINGS_FACE, + // SETTINGS_SOURCE_FOLDER, GET_SETTINGS, } from '../graphql/settings' // import folder from '../static/images/folder.svg' @@ -42,22 +43,27 @@ export default function Settings() { { key: 'classificationColorEnabled', type: 'boolean', - label: 'Run color analysis on photos?', + label: 'Run color analysis on photos', }, { key: 'classificationLocationEnabled', type: 'boolean', - label: 'Run location detection on photos?', + label: 'Run location detection on photos', + }, + { + key: 'classificationFaceEnabled', + type: 'boolean', + label: 'Run face recognition on photos', }, { key: 'classificationStyleEnabled', type: 'boolean', - label: 'Run style classification on photos?', + label: 'Run style classification on photos', }, { key: 'classificationObjectEnabled', type: 'boolean', - label: 'Run object detection on photos?', + label: 'Run object detection on photos', }, ] @@ -100,34 +106,45 @@ export default function Settings() { }, }).catch((e) => {}) return key + case 'classificationFaceEnabled': + settingUpdateFace({ + variables: { + classificationFaceEnabled: newSettings.classificationFaceEnabled, + libraryId: activeLibrary?.id, + }, + }).catch((e) => {}) + return key default: return null } } - function onSelectSourceDir() { - if (window.sendSyncToElectron) { - let dirs = window.sendSyncToElectron('select-dir') - setSettings({ sourceDirs: dirs }) - } - } + // TODO: Re-implement desktop app settings integration + // function onSelectSourceDir() { + // if (window.sendSyncToElectron) { + // let dirs = window.sendSyncToElectron('select-dir') + // setSettings({ sourceDirs: dirs }) + // } + // } + + // function onChangeSourceDir(e) { + // let newSettings = { ...settings } + // newSettings.sourceDirs = e.currentTarget.value + // setSettings(newSettings) + // settingUpdateSourceFolder({ + // variables: { + // sourceFolder: newSettings.sourceDirs, + // libraryId: activeLibrary?.id, + // }, + // }).catch((e) => {}) + // } - function onChangeSourceDir(e) { - let newSettings = { ...settings } - newSettings.sourceDirs = e.currentTarget.value - setSettings(newSettings) - settingUpdateSourceFolder({ - variables: { - sourceFolder: newSettings.sourceDirs, - libraryId: activeLibrary?.id, - }, - }).catch((e) => {}) - } const [settingUpdateStyle] = useMutation(SETTINGS_STYLE) const [settingUpdateColor] = useMutation(SETTINGS_COLOR) const [settingUpdateLocation] = useMutation(SETTINGS_LOCATION) const [settingUpdateObject] = useMutation(SETTINGS_OBJECT) - const [settingUpdateSourceFolder] = useMutation(SETTINGS_SOURCE_FOLDER) + const [settingUpdateFace] = useMutation(SETTINGS_FACE) + // const [settingUpdateSourceFolder] = useMutation(SETTINGS_SOURCE_FOLDER) return ( @@ -139,20 +156,20 @@ export default function Settings() { if (settings) { if (item.type === 'path') { - field = ( - - - - - ) + // field = ( + // + // + // + // + // ) } else if (item.type === 'boolean') { field = ( { +export const useSettings = (activeLibrary) => { const [existingSettings, setSettings] = useState({}) const { loading, data, refetch } = useQuery(GET_SETTINGS, { variables: { libraryId: activeLibrary?.id }, }) - const isInitialMount = useRef(true) + // console.log(error) + // const isInitialMount = useRef(true) + + // useEffect(() => { + // refetch() + // }, [activeLibrary, refetch]) useEffect(() => { - refetch() - }, [activeLibrary, refetch]) + if (activeLibrary && !loading) { + refetch() + } + }, [activeLibrary, loading, refetch]) useEffect(() => { - if (isInitialMount.current) { - isInitialMount.current = false - } else { - if (!loading && data) { - let setting = { ...data.librarySetting.library } - setting.sourceDirs = data.librarySetting.sourceFolder - setSettings(setting) - } + // if (isInitialMount.current) { + // isInitialMount.current = false + // } else { + if (!loading && data) { + let setting = { ...data.librarySetting.library } + setting.sourceDirs = data.librarySetting.sourceFolder + setSettings(setting) } - // TODO: Re-sync with desktop app - // if (window.sendSyncToElectron) { - // let result = window.sendSyncToElectron('get-settings') - // setSettings(result) - // } }, [data, loading]) + // useEffect(() => { + // if (activeLibrary) { + // refetch() + // } + // if (!loading) { + // let setting = {...data.librarySetting.library} + // setting.sourceDirs = data.librarySetting.sourceFolder + // setSettings(setting) + // } + // if (window.sendSyncToElectron) { + // let result = window.sendSyncToElectron('get-settings') + // setSettings(result) + // } + // }, [activeLibrary, loading, refetch, data]) + function setAndSaveSettings(newSettings) { - if (window.sendSyncToElectron) { - window.sendSyncToElectron('set-settings', newSettings) - } + // if (window.sendSyncToElectron) { + // window.sendSyncToElectron('set-settings', newSettings) + // } setSettings(newSettings) } - return [existingSettings, setAndSaveSettings] } diff --git a/ui/src/components/User.js b/ui/src/components/User.js index 84cb25da..e969d88a 100644 --- a/ui/src/components/User.js +++ b/ui/src/components/User.js @@ -1,4 +1,4 @@ -import React, { useRef, useState, useEffect } from 'react' +import React, { useEffect } from 'react' import { Link } from 'react-router-dom' import { useDispatch, useSelector } from 'react-redux' import PropTypes from 'prop-types' @@ -10,6 +10,7 @@ import arrowDown from '../static/images/arrow_down.svg' import library from '../static/images/library.svg' import settings from '../static/images/settings.svg' import logout from '../static/images/logout.svg' +import { useComponentVisible } from './Header' const Container = styled('div')` width: 84px; @@ -108,33 +109,7 @@ const Container = styled('div')` } ` -function useComponentVisible(initialIsVisible) { - const [isComponentVisible, setIsComponentVisible] = useState(initialIsVisible) - const ref = useRef(null) - - const handleHideDropdown = (event: KeyboardEvent) => { - if (event.key === 'Escape') { - setIsComponentVisible(false) - } - } - - const handleClickOutside = (event) => { - if (ref.current && !ref.current.contains(event.target)) { - setIsComponentVisible(false) - } - } - useEffect(() => { - document.addEventListener('keydown', handleHideDropdown, false) - document.addEventListener('click', handleClickOutside, false) - return () => { - document.removeEventListener('keydown', handleHideDropdown, true) - document.removeEventListener('click', handleClickOutside, true) - } - }) - - return { ref, isComponentVisible, setIsComponentVisible } -} -const User = ({ profile, libraries }) => { +const User = ({ profile, libraries, showUserMenu ,setShowUserMenu, setShowNotification }) => { const dispatch = useDispatch() const activeLibrary = useSelector(getActiveLibrary) const { @@ -155,14 +130,20 @@ const User = ({ profile, libraries }) => { } const handleShowMenu = () => { setIsComponentVisible(true) + setShowUserMenu(true) + setShowNotification(false) } + useEffect(() => { + if (!isComponentVisible) + setShowUserMenu(false) + }, [isComponentVisible, setShowUserMenu]) return ( User account
    {profile ? ( diff --git a/ui/src/components/ZoomableImage.js b/ui/src/components/ZoomableImage.js index bb3cb914..02401f64 100644 --- a/ui/src/components/ZoomableImage.js +++ b/ui/src/components/ZoomableImage.js @@ -78,12 +78,26 @@ const Container = styled('div')` } ` -const ZoomableImage = ({ photoId, boxes, next, prev }) => { +const ZoomableImage = ({ + photoId, + boxes, + next, + prev, + refetch, + showBoundingBox, + setShowBoundingBox, + setShowMetadata, + showMetadata, + showTopIcons, + setShowTopIcons, +}) => { const [scale, setScale] = useState(1) const [zoom, setZoom] = useState(false) const [loading, setLoading] = useState(true) const [displayImage, setDisplayImage] = useState(false) - + const [editLableId, setEditLableId] = useState('') + let clickTimeOut = null + const prevNextPhotos = useSelector((state) => getPrevNextPhotos(state, photoId) ) @@ -99,7 +113,7 @@ const ZoomableImage = ({ photoId, boxes, next, prev }) => { const swipeHandlers = useSwipeable({ onSwipedLeft: () => nextPhoto(), - onSwipedRight: () => prevPhoto() + onSwipedRight: () => prevPhoto(), }) const loadNextPrevImages = () => { @@ -134,8 +148,32 @@ const ZoomableImage = ({ photoId, boxes, next, prev }) => { const handleZoom = (e) => { if (e.scale === 1 && zoom) { setZoom(false) - } else if(e.scale > 1 && !zoom) { - setZoom(true) + } else if (e.scale > 1 && !zoom) { + setTimeout(() => { + setZoom(true) + }, 200) + } + } + + // To handle icon show hide on single click. + const showHideIcons = (event) => { + if (!editLableId) { + if (clickTimeOut !== null) { + clearTimeout(clickTimeOut) + } else { + clickTimeOut = setTimeout(() => { + // setShowFaceIcons(!showFaceIcons) + if (showMetadata) { + setShowMetadata(!showMetadata) + } else { + setShowTopIcons(!showTopIcons) + } + clearTimeout(clickTimeOut) + clickTimeOut = null + }, 300) + } + } else { + setEditLableId('') } } @@ -159,17 +197,30 @@ const ZoomableImage = ({ photoId, boxes, next, prev }) => {
    - -
    +
    - - {boxes && } - + {boxes && + showTopIcons && + Object.keys(boxes).map((key, index) => ( + + + + ))}
    @@ -189,15 +240,32 @@ const ZoomableImage = ({ photoId, boxes, next, prev }) => { ZoomableImage.propTypes = { photoId: PropTypes.string, - boxes: PropTypes.arrayOf( - PropTypes.shape({ - name: PropTypes.string, - positionX: PropTypes.number, - positionY: PropTypes.number, - sizeX: PropTypes.number, - sizeY: PropTypes.number, - }) - ), + boxes: PropTypes.shape({ + object: PropTypes.arrayOf( + PropTypes.shape({ + name: PropTypes.string, + positionX: PropTypes.number, + positionY: PropTypes.number, + sizeX: PropTypes.number, + sizeY: PropTypes.number, + }) + ), + face: PropTypes.arrayOf( + PropTypes.shape({ + id: PropTypes.string, + name: PropTypes.string, + positionX: PropTypes.number, + positionY: PropTypes.number, + sizeX: PropTypes.number, + sizeY: PropTypes.number, + verified: PropTypes.bool, + deleted: PropTypes.bool, + boxColorClass: PropTypes.string, + showVerifyIcon: PropTypes.bool, + }) + ), + }), + refetch: PropTypes.func, } export default ZoomableImage diff --git a/ui/src/components/onboarding/Result.js b/ui/src/components/onboarding/Result.js index d9d66664..ba0c4118 100644 --- a/ui/src/components/onboarding/Result.js +++ b/ui/src/components/onboarding/Result.js @@ -2,6 +2,7 @@ import React from 'react' const Result = () => { localStorage.setItem('isSignin', true) + sessionStorage.removeItem('__STATE_MACHINE__'); setTimeout(() => { window.location.reload() }, 2000) diff --git a/ui/src/components/onboarding/Step3CreateLibrary.js b/ui/src/components/onboarding/Step3CreateLibrary.js index 58e398ef..ab445ced 100644 --- a/ui/src/components/onboarding/Step3CreateLibrary.js +++ b/ui/src/components/onboarding/Step3CreateLibrary.js @@ -87,10 +87,12 @@ const Step3CreateLibrary = ({ history }) => { {state.data.storageBackend === 'Lo' && (

    - The base path will need to be writeable so that we can put new - files here, and also needs to be large enough to store your whole - collection. If you’re running in a container, feel free to restart - it with new mounted volumes if you need to. + Leave base path as the default unless you have configured multiple + volumes for multiple libraries. The base path will need to be + writeable so that we can put new files here, and also needs to be + large enough to store your whole collection. If you’re running in + a container, feel free to restart it with new mounted volumes if + you need to.

    { : true } /> + { - this.setState({ search: event.target.value }) + updateSearchText = (value) => { + this.setState({ search: value }) } render = () => { diff --git a/ui/src/containers/FiltersContainer.js b/ui/src/containers/FiltersContainer.js index bc32b2e3..05d1e9e5 100644 --- a/ui/src/containers/FiltersContainer.js +++ b/ui/src/containers/FiltersContainer.js @@ -1,10 +1,11 @@ -import React, { useEffect } from 'react' +import React, { useEffect, useState } from 'react' import { useQuery } from '@apollo/client' import { useSelector } from 'react-redux' import gql from 'graphql-tag' import Filters from '../components/Filters' import Spinner from '../components/Spinner' import { getActiveLibrary } from '../stores/libraries/selector' +import { isTagUpdated } from '../stores/tag/selector' const GET_FILTERS = gql` query AllFilters($libraryId: UUID, $multiFilter: String) { @@ -31,6 +32,10 @@ const GET_FILTERS = gql` id name } + allEventTags(libraryId: $libraryId, multiFilter: $multiFilter) { + id + name + } allCameras(libraryId: $libraryId) { id make @@ -72,9 +77,26 @@ function createFilterSelection(sectionName, data, prefix = 'tag') { } } -const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { +const REMOVABLE_TAGS = [ + 'Aperture', + 'Exposure', + 'ISO Speed', + 'Focal Length', + 'Rating', + 'Flash', +] + +const FiltersContainer = ({ + selectedFilters, + onFilterToggle, + searchAreaExpand, + setFilters, +}) => { const user = useSelector((state) => state.user) // Using user here from Redux store so we can wait for any JWT tokens to be refreshed before running GraphQL queries that require authentication + const [isFiltersAvail, setIsFiltersAvail] = useState(false) const activeLibrary = useSelector(getActiveLibrary) + let filterData = [] + const tagUpdated = useSelector(isTagUpdated) let filtersStr = '' if (activeLibrary) { filtersStr = `${selectedFilters.map((filter) => filter.id).join(' ')}` @@ -89,16 +111,27 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { }, { skip: !user } ) + useEffect(() => { refetch() + }, [activeLibrary, refetch, tagUpdated]) - }, [activeLibrary, refetch]) + useEffect(() => { + if (isFiltersAvail && filterData.length) { + const autoSuggestionFilters = filterData.filter((f) => { + return REMOVABLE_TAGS.indexOf(f.name) === -1 + }) + setFilters(autoSuggestionFilters) + } // eslint-disable-next-line + }, [isFiltersAvail, setFilters]) const getFilterdData = (type, array) => { const filterArr = selectedFilters.filter((s) => s.group === type) let data = [] if (type === 'Locations' && filterArr.length > 0) { - const id = array.filter((c) => filterArr.find((rm) => rm.name === c.name))[0].id + const id = array.filter((c) => + filterArr.find((rm) => rm.name === c.name) + )[0].id data = array.filter((c) => !filterArr.find((rm) => rm.name === c.name)) data = data.filter((d) => d?.parent?.id !== id) } else { @@ -109,7 +142,6 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { if (loading) return if (error) return `Error! ${error.message}` - let filterData = [] if (data) { if (data.allGenericTags.length) { filterData.push( @@ -124,10 +156,6 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { const locationsTags = getFilterdData('Locations', data.allLocationTags) filterData.push(createFilterSelection('Locations', locationsTags)) } - if (data.allPersonTags.length) { - const peopleTags = getFilterdData('People', data.allPersonTags) - filterData.push(createFilterSelection('People', peopleTags)) - } if (data.allColorTags.length) { const colorsTags = getFilterdData('Colors', data.allColorTags) filterData.push(createFilterSelection('Colors', colorsTags)) @@ -136,6 +164,14 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { const stylesTags = getFilterdData('Styles', data.allStyleTags) filterData.push(createFilterSelection('Styles', stylesTags)) } + if (data.allEventTags.length) { + const eventsTags = getFilterdData('Events', data.allEventTags) + filterData.push(createFilterSelection('Events', eventsTags)) + } + if (data.allPersonTags.length) { + const peopleTags = getFilterdData('People', data.allPersonTags) + filterData.push(createFilterSelection('People', peopleTags)) + } if (data.allCameras.length) { filterData.push({ name: 'Cameras', @@ -212,10 +248,13 @@ const FiltersContainer = ({ selectedFilters, onFilterToggle }) => { ) ) } + if (!isFiltersAvail) setIsFiltersAvail(true) } + return ( diff --git a/ui/src/containers/PhotoDetailContainer.js b/ui/src/containers/PhotoDetailContainer.js index ea920071..076d93be 100644 --- a/ui/src/containers/PhotoDetailContainer.js +++ b/ui/src/containers/PhotoDetailContainer.js @@ -54,6 +54,19 @@ const GET_PHOTO = gql` sizeX sizeY } + personTags { + id + tag { + name + } + positionX + positionY + sizeX + sizeY + verified + deleted + showVerifyIcon + } colorTags { id tag { @@ -61,6 +74,13 @@ const GET_PHOTO = gql` } significance } + eventTags { + id + tag { + name + } + significance + } styleTags { id tag { @@ -80,6 +100,7 @@ const GET_PHOTO = gql` } baseFileId baseFilePath + downloadUrl width height } @@ -104,7 +125,7 @@ const PhotoDetailContainer = (props) => { const handleKeyDown = (event) => { switch (event.keyCode) { case ESCAPE_KEY: - history.push('/') + event.target.name !== 'tagName' && history.push('/') break default: break diff --git a/ui/src/containers/SearchContainer.js b/ui/src/containers/SearchContainer.js index bd4a6327..a66ab6c8 100644 --- a/ui/src/containers/SearchContainer.js +++ b/ui/src/containers/SearchContainer.js @@ -9,6 +9,8 @@ export default class SearchContainer extends React.Component { search={this.props.search} onFilterToggle={this.props.onFilterToggle} onClearFilters={this.props.onClearFilters} - updateSearchText={this.props.updateSearchText} /> + updateSearchText={this.props.updateSearchText} + searchAreaExpand={this.props.searchAreaExpand} + /> } } diff --git a/ui/src/containers/SearchInputContainer.js b/ui/src/containers/SearchInputContainer.js index 15994353..27363f8c 100644 --- a/ui/src/containers/SearchInputContainer.js +++ b/ui/src/containers/SearchInputContainer.js @@ -10,6 +10,7 @@ export default class SearchInputContainer extends React.Component { onClearFilters={this.props.onClearFilters} search={this.props.search} onSearchTextChange={this.props.updateSearchText} + filters={this.props.filters} /> ) } diff --git a/ui/src/graphql/onboarding.js b/ui/src/graphql/onboarding.js index 1e482292..8b2e1f95 100644 --- a/ui/src/graphql/onboarding.js +++ b/ui/src/graphql/onboarding.js @@ -127,6 +127,7 @@ mutation ( $classificationStyleEnabled: Boolean!, $classificationObjectEnabled: Boolean!, $classificationLocationEnabled: Boolean!, + $classificationFaceEnabled: Boolean!, $userId: ID!, $libraryId: ID!, ) { @@ -135,6 +136,7 @@ mutation ( classificationStyleEnabled:$classificationStyleEnabled, classificationObjectEnabled:$classificationObjectEnabled, classificationLocationEnabled:$classificationLocationEnabled, + classificationFaceEnabled:$classificationFaceEnabled, userId:$userId, libraryId:$libraryId, }) { diff --git a/ui/src/graphql/settings.js b/ui/src/graphql/settings.js index cdd13f98..516e5b4e 100644 --- a/ui/src/graphql/settings.js +++ b/ui/src/graphql/settings.js @@ -60,6 +60,21 @@ export const SETTINGS_OBJECT = gql` } } ` +export const SETTINGS_FACE = gql` + mutation updateFaceEnabled( + $classificationFaceEnabled: Boolean! + $libraryId: ID + ) { + updateFaceEnabled( + input: { + classificationFaceEnabled: $classificationFaceEnabled + libraryId: $libraryId + } + ) { + classificationFaceEnabled + } + } +` export const SETTINGS_SOURCE_FOLDER = gql` mutation updateSourceFolder($sourceFolder: String!, $libraryId: ID) { updateSourceFolder( @@ -79,8 +94,22 @@ export const GET_SETTINGS = gql` classificationStyleEnabled classificationObjectEnabled classificationLocationEnabled + classificationFaceEnabled } sourceFolder } } ` +export const GET_TASK_PROGRESS = gql` + query TaskProgress { + taskProgress { + generateThumbnails + processRaw + classifyColor + classifyObject + classifyLocation + classifyStyle + classifyFace + } + } +` \ No newline at end of file diff --git a/ui/src/graphql/tag.js b/ui/src/graphql/tag.js index 817c7fef..452923e6 100644 --- a/ui/src/graphql/tag.js +++ b/ui/src/graphql/tag.js @@ -23,3 +23,31 @@ export const REMOVE_TAG = gql` } } ` +export const EDIT_FACE_TAG = gql` + mutation editFaceTag( + $photoTagId: ID!, + $newName: String!, + ) { + editFaceTag(photoTagId:$photoTagId, newName:$newName) { + ok + } + } +` +export const BLOCK_FACE_TAG = gql` + mutation blockFaceTag( + $photoTagId: ID!, + ) { + blockFaceTag(photoTagId:$photoTagId) { + ok + } + } +` +export const VERIFY_FACE_TAG = gql` + mutation verifyPhoto( + $photoTagId: ID!, + ) { + verifyPhoto(photoTagId:$photoTagId) { + ok + } + } +` \ No newline at end of file diff --git a/ui/src/static/css/Filters.css b/ui/src/static/css/Filters.css index f1ead7fe..c9f08250 100644 --- a/ui/src/static/css/Filters.css +++ b/ui/src/static/css/Filters.css @@ -6,13 +6,28 @@ } .FiltersContent { width: max-content; - margin: 0 0 0 40px; + padding-left: 40px; } -.FiltersContent { - width: max-content; - margin: 0 0 0 40px; +.PeekAnimation { transition: opacity 1000ms; + animation: autoPeek ease-in-out 2s alternate; + animation-iteration-count: 2; +} +/* For auto scroll. */ +@keyframes autoPeek { + 0% { + margin-left: 0; + transform: translate3d(0, 0, 0); + } + 75% { + margin-left: 0; + transform: translate3d(0, 0, 0); + } + 100% { + margin-left: 0; + transform: translate3d(-200px, 0, 0); + } } .FiltersContent .filterGradient { @@ -134,7 +149,7 @@ margin: 0 -20px -20px; } .FiltersContent { - margin-left: 30px; + padding-left: 30px; } .FilterGroup { width: 180px; diff --git a/ui/src/static/css/Map.css b/ui/src/static/css/Map.css index 46e843fc..c1b5907b 100644 --- a/ui/src/static/css/Map.css +++ b/ui/src/static/css/Map.css @@ -49,3 +49,9 @@ .leaflet-popup-close-button { display: none; } +.leaflet-custom-icon { + border: 1px solid rgba(255, 255, 255, 0.9); + border-radius: 50%; + box-shadow: 0 5px 12px rgba(0, 0, 0, 0.5); + background: rgba(255, 255, 255, 0.25); +} diff --git a/ui/src/static/css/SearchInput.css b/ui/src/static/css/SearchInput.css index 6f04367c..7a656598 100644 --- a/ui/src/static/css/SearchInput.css +++ b/ui/src/static/css/SearchInput.css @@ -36,8 +36,8 @@ .SearchInput li.filter svg.removeIcon:hover { opacity: 0.6; } -.SearchInput input[type='text'] { - width: 150px; +.SearchInput input { + width: 100%; height: 30px; flex: 1; min-width: 150px; @@ -49,7 +49,7 @@ color: #fff; line-height: 1; } -.SearchInput input[type='text']::placeholder { +.SearchInput input::placeholder { /* Chrome, Firefox, Opera, Safari 10.1+ */ color: rgba(255, 255, 255, 0.6); opacity: 1; /* Firefox */ @@ -64,3 +64,49 @@ .SearchInput svg.clearAll:hover { opacity: 0.6; } + +ul.options { + display: block; + list-style: none; + transition: width 0.3s; + margin: auto; + position: absolute; + top: 100%; + left: 0; + width: 100%; + padding: 0; + background: #484848; + opacity: 0.9; + /*max-height: 200px;*/ + overflow-y: auto; + z-index: 2; +} + +ul.options li { + display: flex; + margin: 0; + padding: 10px; + font-size: 14px; + width: 100%; + transition: 0.3s all; + cursor: pointer; + color: white; + justify-content: space-between; + align-items: center; +} +ul.options li svg { + display: inline-block; + vertical-align: middle; + margin-right: 5px; + filter: invert(0.9); +} +ul.options li:hover { + background-color: #545454; +} + +ul.options li.option-active { + background-color: #545454; +} +.no-options { + color: white; +} diff --git a/ui/src/static/images/block_black.svg b/ui/src/static/images/block_black.svg new file mode 100644 index 00000000..ad9c488c --- /dev/null +++ b/ui/src/static/images/block_black.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/static/images/done_black.svg b/ui/src/static/images/done_black.svg new file mode 100644 index 00000000..4a715722 --- /dev/null +++ b/ui/src/static/images/done_black.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/static/images/download_arrow.svg b/ui/src/static/images/download_arrow.svg new file mode 100644 index 00000000..e40eaee8 --- /dev/null +++ b/ui/src/static/images/download_arrow.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/static/images/event.svg b/ui/src/static/images/event.svg new file mode 100644 index 00000000..c4d40720 --- /dev/null +++ b/ui/src/static/images/event.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/static/images/pause.svg b/ui/src/static/images/pause.svg new file mode 100644 index 00000000..914f25a7 --- /dev/null +++ b/ui/src/static/images/pause.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/static/images/person.svg b/ui/src/static/images/person.svg new file mode 100644 index 00000000..ce0341d5 --- /dev/null +++ b/ui/src/static/images/person.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/static/images/play.svg b/ui/src/static/images/play.svg new file mode 100644 index 00000000..1bb457c6 --- /dev/null +++ b/ui/src/static/images/play.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ui/src/stores/index.js b/ui/src/stores/index.js index 621e161f..ac8e5741 100644 --- a/ui/src/stores/index.js +++ b/ui/src/stores/index.js @@ -3,12 +3,14 @@ import layout from './layout' import libraries from './libraries' import photos from './photos' import user from './user' +import isTagUpdated from "./tag"; const reducers = combineReducers({ layout, libraries, photos, user, + isTagUpdated, }) export default reducers diff --git a/ui/src/stores/tag/index.js b/ui/src/stores/tag/index.js new file mode 100644 index 00000000..c4a477e8 --- /dev/null +++ b/ui/src/stores/tag/index.js @@ -0,0 +1,15 @@ +const IS_TAG_UPDATE = 'IS_TAG_UPDATE' + +const initialState = {updated:false} + +const isTagUpdated = (state = initialState, action = {}) => { + switch (action.type) { + case IS_TAG_UPDATE: + state.updated = action.payload.updated + return state + default: + return state + } +} + +export default isTagUpdated diff --git a/ui/src/stores/tag/selector.js b/ui/src/stores/tag/selector.js new file mode 100644 index 00000000..45a37586 --- /dev/null +++ b/ui/src/stores/tag/selector.js @@ -0,0 +1,3 @@ +export const isTagUpdated = (state) => { + return state.isTagUpdated.updated + } \ No newline at end of file