|
from fastapi import FastAPI |
|
import os |
|
from typing import Any, Union,Dict, List |
|
import numpy as np |
|
import io |
|
import base64 |
|
import requests |
|
|
|
import cv2 |
|
from PIL import Image |
|
|
|
|
|
app = FastAPI() |
|
|
|
|
|
|
|
|
|
prototxtPath = os.path.sep.join(["face_detector", "deploy.prototxt"]) |
|
weightsPath = os.path.sep.join(["face_detector", |
|
"res10_300x300_ssd_iter_140000.caffemodel"]) |
|
net = cv2.dnn.readNet(prototxtPath, weightsPath) |
|
|
|
|
|
|
|
|
|
|
|
args = { |
|
"method": "simple", |
|
"blocks": 20, |
|
"confidence": 0.5 |
|
} |
|
def anonymize_face_simple(image, factor=3.0): |
|
|
|
|
|
(h, w) = image.shape[:2] |
|
kW = int(w / factor) |
|
kH = int(h / factor) |
|
|
|
|
|
if kW % 2 == 0: |
|
kW -= 1 |
|
|
|
|
|
if kH % 2 == 0: |
|
kH -= 1 |
|
|
|
|
|
|
|
return cv2.GaussianBlur(image, (kW, kH), 0) |
|
|
|
def anonymize_face_pixelate(image, blocks=3): |
|
|
|
(h, w) = image.shape[:2] |
|
xSteps = np.linspace(0, w, blocks + 1, dtype="int") |
|
ySteps = np.linspace(0, h, blocks + 1, dtype="int") |
|
|
|
|
|
for i in range(1, len(ySteps)): |
|
for j in range(1, len(xSteps)): |
|
|
|
|
|
startX = xSteps[j - 1] |
|
startY = ySteps[i - 1] |
|
endX = xSteps[j] |
|
endY = ySteps[i] |
|
|
|
|
|
|
|
|
|
roi = image[startY:endY, startX:endX] |
|
(B, G, R) = [int(x) for x in cv2.mean(roi)[:3]] |
|
cv2.rectangle(image, (startX, startY), (endX, endY), |
|
(B, G, R), -1) |
|
|
|
|
|
return image |
|
|
|
@app.get("/generate") |
|
def generate(path: str): |
|
""" |
|
Using the text summarization pipeline from `transformers`, summerize text |
|
from the given input text. The model used is `philschmid/bart-large-cnn-samsum`, which |
|
can be found [here](<https://huggingface.co/philschmid/bart-large-cnn-samsum>). |
|
""" |
|
r = requests.get(path, stream=True) |
|
img = Image.open(io.BytesIO(r.content)).convert('RGB') |
|
open_cv_image = np.array(img) |
|
|
|
open_cv_image = open_cv_image[:, :, ::-1].copy() |
|
image = open_cv_image |
|
orig = image.copy() |
|
(h, w) = image.shape[:2] |
|
|
|
|
|
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300),(104.0, 177.0, 123.0)) |
|
|
|
|
|
logger.info("computing face detections...") |
|
net.setInput(blob) |
|
detections = net.forward() |
|
|
|
|
|
for i in range(0, detections.shape[2]): |
|
|
|
|
|
confidence = detections[0, 0, i, 2] |
|
|
|
|
|
|
|
if confidence > args["confidence"]: |
|
|
|
|
|
box = detections[0, 0, i, 3:7] * np.array([w, h, w, h]) |
|
(startX, startY, endX, endY) = box.astype("int") |
|
|
|
|
|
face = image[startY:endY, startX:endX] |
|
|
|
|
|
|
|
if args["method"] == "simple": |
|
face = anonymize_face_simple(face, factor=3.0) |
|
|
|
|
|
|
|
else: |
|
face = anonymize_face_pixelate(face,blocks=args["blocks"]) |
|
|
|
|
|
image[startY:endY, startX:endX] = face |
|
|
|
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) |
|
img = Image.fromarray(image) |
|
|
|
im_file = io.BytesIO() |
|
img.save(im_file, format="PNG") |
|
im_bytes = base64.b64encode(im_file.getvalue()).decode("utf-8") |
|
|
|
|
|
return {"output": im_bytes} |
|
|
|
|