diff --git a/jsk_perception/node_scripts/aws_auto_checkin_app.py b/jsk_perception/node_scripts/aws_auto_checkin_app.py index a4bbc2c332..5b4d4e96c4 100755 --- a/jsk_perception/node_scripts/aws_auto_checkin_app.py +++ b/jsk_perception/node_scripts/aws_auto_checkin_app.py @@ -173,8 +173,10 @@ def findface(self, face_image): CollectionId=self.COLLECTION_ID, Image={'Bytes': encoded_face_image.tobytes()}, FaceMatchThreshold=self.FACE_SIMILARITY_THRESHOLD, MaxFaces=self.MAX_FACES) return res + except self.rekognition.exceptions.InvalidParameterException as e: + rospy.logdebug("No faces detected") except Exception as e: - print(e) + rospy.logerr(e) return None @@ -211,18 +213,23 @@ def callback(self, image, roi): ret = self.findface(img[image_roi_slice]) if ret != None: if ret['FaceMatches'] != []: - item = self.dynamodb_table.get_item( - Key={'RekognitionId': - ret['FaceMatches'][0]['Face']['FaceId']}) - if not 'Item' in item: - rospy.loginfo("Item does not have FaceId {}".format(item)) - continue - face_id = item['Item']['Name'] - rospy.loginfo("FaceId: {}\n Similarity: {}".format(face_id, \ - ret['FaceMatches'][0]['Similarity'])) - faces.faces.append(Face(face=Rect(cx - w // 2, cy - h // 2, w, h), - label=face_id, - confidence=ret['FaceMatches'][0]['Similarity'] / 100.0)) + try: + item = self.dynamodb_table.get_item( + Key={'RekognitionId': + ret['FaceMatches'][0]['Face']['FaceId']}) + if not 'Item' in item: + rospy.loginfo("Item does not have FaceId {}".format(item)) + continue + face_id = item['Item']['Name'] + rospy.logdebug("FaceId: {}\n Similarity: {}".format(face_id, \ + ret['FaceMatches'][0]['Similarity'])) + faces.faces.append(Face(face=Rect(cx - w // 2, cy - h // 2, w, h), + label=face_id, + confidence=ret['FaceMatches'][0]['Similarity'] / 100.0)) + except KeyError as e: + rospy.logwarn( + "{}: Dynamodb does not have FaceID: {}".format( + e, ret['FaceMatches'][0]['Face']['FaceID'])) if self.use_window: # copy colored face rectangle to img_gray img_gray[image_roi_slice] = img[image_roi_slice] diff --git a/jsk_perception/node_scripts/aws_detect_faces.py b/jsk_perception/node_scripts/aws_detect_faces.py index 64be38d923..5adc58b793 100755 --- a/jsk_perception/node_scripts/aws_detect_faces.py +++ b/jsk_perception/node_scripts/aws_detect_faces.py @@ -162,7 +162,7 @@ def reconfigure_callback(self, config, level): return config def process_attributes(self, text, img, bbox): - rospy.loginfo(" {}".format(text)) + rospy.logdebug(" {}".format(text)) if self.use_window: cv2.putText(img, text, (bbox.x + bbox.height // 2 + 8, bbox.y - bbox.width // 2 + self.offset), cv2.FONT_HERSHEY_PLAIN, @@ -213,7 +213,7 @@ def image_callback(self, image): landmarks_msgs.poses = [] # See https://docs.aws.amazon.com/rekognition/latest/dg/API_DetectFaces.html for detail - rospy.loginfo("Found {} faces".format(len(faces['FaceDetails']))) + rospy.logdebug("Found {} faces".format(len(faces['FaceDetails']))) for face in faces['FaceDetails']: # Bounding box of the face @@ -387,7 +387,7 @@ def image_callback(self, image): self.landmarks_pub.publish(landmarks_msgs) # debug info - rospy.loginfo("processing time {} on message taken at {} sec ago".format( + rospy.logdebug("processing time {} on message taken at {} sec ago".format( (rospy.Time.now() - start_time).to_sec(), (rospy.Time.now() - image.header.stamp).to_sec()))