-
Notifications
You must be signed in to change notification settings - Fork 28
Photo upload refactor #2733
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: master
Are you sure you want to change the base?
Photo upload refactor #2733
Changes from 4 commits
b80e088
abfcbc4
3321ddf
597b264
b581d38
77779eb
4b10091
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
This file was deleted.
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,13 +1,18 @@ | ||
| import ast | ||
| import json | ||
| import uuid | ||
| from datetime import date | ||
| from os.path import join, splitext | ||
| from tempfile import NamedTemporaryFile | ||
|
|
||
| import sorl.thumbnail | ||
| from django.contrib.auth.models import User | ||
| from django.db import models | ||
| from django.urls import reverse | ||
| from PIL import Image as PillowImage | ||
| from PIL import ImageOps | ||
|
|
||
| from .helpers import convert_image_to_png | ||
|
|
||
| PHOTO_REVIEWERS_GROUP_NAME = "Photo Reviewers" | ||
| VERY_TRUSTED_USER_GROUP_NAME = "Very Trusted User" | ||
|
|
@@ -126,6 +131,84 @@ def uploaded_by(self): | |
| return self.user.username | ||
| return "a robot 🤖" | ||
|
|
||
| def start_image_processing(self): | ||
| from django_q.tasks import async_chain | ||
|
|
||
| async_chain( | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We need to explicitly set a timeout on these two tasks. Our default timeout for the cluster is 240 seconds. We need this to be quite long for our scheduled tasks which are processing many objects in a single run, but we should set a shorter timeout on these jobs that are only processing a single object. Also a quick note on retries: Our cluster is set not to retry failed tasks. We basically need that to be true at the moment with all the tasks we have running on a short loop. We can't configure this at a task level, so if this fails once, it won't retry. I don't think that is a huge issue, but worth being aware of. Once we have fewer scheduled tasks running frequently, we can look at changing that. I think as it stands this is no worse than what we do now.
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. this still stands |
||
| [ | ||
| ( | ||
| "moderation_queue.tasks.normalise_queued_image", | ||
| (self.id,), | ||
| ), | ||
| ( | ||
| "moderation_queue.tasks.detect_faces_for_queued_image", | ||
| (self.id,), | ||
| ), | ||
| ] | ||
| ) | ||
|
|
||
| def normalise_image(self): | ||
| pil_img = PillowImage.open(self.image.file) | ||
| pil_img = ImageOps.exif_transpose(pil_img) | ||
| png_buffer = convert_image_to_png(pil_img) | ||
| self.image.save(self.image.name, png_buffer, save=False) | ||
| sorl.thumbnail.delete(self.image.name, delete_file=False) | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Here where we clear the thumnail cache, |
||
|
|
||
| def _face_crop_bound(self, bound, im_size, scaling_factor): | ||
| return max(0, bound * im_size * scaling_factor) | ||
|
|
||
| def _apply_face_detection(self, detected): | ||
| if not (detected and detected.get("FaceDetails")): | ||
| return | ||
| # AWS crops faces tightly by default. these scaling factors give a | ||
| # slightly wider crop that includes more context around the face. | ||
| MIN_SCALING_FACTOR = 0.7 | ||
| MAX_SCALING_FACTOR = 1.3 | ||
| bb = detected["FaceDetails"][0]["BoundingBox"] | ||
| self.crop_min_x = self._face_crop_bound( | ||
| bb["Left"], self.image.width, MIN_SCALING_FACTOR | ||
| ) | ||
| self.crop_min_y = self._face_crop_bound( | ||
| bb["Top"], self.image.height, MIN_SCALING_FACTOR | ||
| ) | ||
| self.crop_max_x = self._face_crop_bound( | ||
| bb["Width"], self.image.width, MAX_SCALING_FACTOR | ||
| ) | ||
| self.crop_max_y = self._face_crop_bound( | ||
| bb["Height"], self.image.height, MAX_SCALING_FACTOR | ||
| ) | ||
| self.detection_metadata = json.dumps(detected, indent=4) | ||
|
|
||
| def detect_faces(self): | ||
| import boto3 | ||
|
chris48s marked this conversation as resolved.
Outdated
|
||
|
|
||
| try: | ||
| from storages.backends.s3 import S3Storage | ||
| except ImportError: | ||
| S3Storage = None | ||
|
|
||
| try: | ||
| rekognition = boto3.client("rekognition", region_name="eu-west-1") | ||
| storage = self.image.storage | ||
| if S3Storage and isinstance(storage, S3Storage): | ||
| rekognition_image = { | ||
| "S3Object": { | ||
| "Bucket": storage.bucket_name, | ||
| "Name": storage._normalize_name(self.image.name), | ||
| } | ||
| } | ||
| else: | ||
| with self.image.open("rb") as f: | ||
| rekognition_image = {"Bytes": f.read()} | ||
| detected = rekognition.detect_faces( | ||
| Image=rekognition_image, Attributes=["ALL"] | ||
| ) | ||
| self._apply_face_detection(detected) | ||
| finally: | ||
| self.face_detection_tried = True | ||
| self.rotation_tried = True | ||
| self.save() | ||
|
|
||
| def crop_image(self): | ||
| """ | ||
| Returns a temporary file containing the cropped image | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,11 @@ | ||
| from moderation_queue.models import QueuedImage | ||
|
|
||
|
|
||
| def normalise_queued_image(queued_image_id): | ||
| qi = QueuedImage.objects.get(pk=queued_image_id) | ||
| qi.normalise_image() | ||
|
|
||
|
|
||
| def detect_faces_for_queued_image(queued_image_id): | ||
| qi = QueuedImage.objects.get(pk=queued_image_id) | ||
| qi.detect_faces() |
Uh oh!
There was an error while loading. Please reload this page.