|
@ -1,4 +1,50 @@ |
|
|
import shutil |
|
|
|
|
|
|
|
|
import cv2 |
|
|
|
|
|
|
|
|
def detect(input, output): |
|
|
def detect(input, output): |
|
|
shutil.copy2(input, output) |
|
|
|
|
|
|
|
|
classes = {0: 'background', |
|
|
|
|
|
1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane', 6: 'bus', |
|
|
|
|
|
7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant', |
|
|
|
|
|
13: 'stop sign', 14: 'parking meter', 15: 'bench', 16: 'bird', 17: 'cat', |
|
|
|
|
|
18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow', 22: 'elephant', 23: 'bear', |
|
|
|
|
|
24: 'zebra', 25: 'giraffe', 27: 'backpack', 28: 'umbrella', 31: 'handbag', |
|
|
|
|
|
32: 'tie', 33: 'suitcase', 34: 'frisbee', 35: 'skis', 36: 'snowboard', |
|
|
|
|
|
37: 'sports ball', 38: 'kite', 39: 'baseball bat', 40: 'baseball glove', |
|
|
|
|
|
41: 'skateboard', 42: 'surfboard', 43: 'tennis racket', 44: 'bottle', |
|
|
|
|
|
46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon', |
|
|
|
|
|
51: 'bowl', 52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange', |
|
|
|
|
|
56: 'broccoli', 57: 'carrot', 58: 'hot dog', 59: 'pizza', 60: 'donut', |
|
|
|
|
|
61: 'cake', 62: 'chair', 63: 'couch', 64: 'potted plant', 65: 'bed', |
|
|
|
|
|
67: 'dining table', 70: 'toilet', 72: 'tv', 73: 'laptop', 74: 'mouse', |
|
|
|
|
|
75: 'remote', 76: 'keyboard', 77: 'cell phone', 78: 'microwave', 79: 'oven', |
|
|
|
|
|
80: 'toaster', 81: 'sink', 82: 'refrigerator', 84: 'book', 85: 'clock', |
|
|
|
|
|
86: 'vase', 87: 'scissors', 88: 'teddy bear', 89: 'hair drier', 90: 'toothbrush'} |
|
|
|
|
|
|
|
|
|
|
|
# Load a model imported from Tensorflow |
|
|
|
|
|
tensorflowNet = cv2.dnn.readNetFromTensorflow('./model/frozen_inference_graph.pb', './model/graph.pbtxt') |
|
|
|
|
|
|
|
|
|
|
|
# Input image |
|
|
|
|
|
img = cv2.imread(input) |
|
|
|
|
|
rows, cols, channels = img.shape |
|
|
|
|
|
|
|
|
|
|
|
# Use the given image as input, which needs to be blob(s). |
|
|
|
|
|
tensorflowNet.setInput(cv2.dnn.blobFromImage(img, size=(300, 300), swapRB=True, crop=False)) |
|
|
|
|
|
|
|
|
|
|
|
# Runs a forward pass to compute the net output |
|
|
|
|
|
networkOutput = tensorflowNet.forward() |
|
|
|
|
|
|
|
|
|
|
|
# Loop on the outputs |
|
|
|
|
|
for detection in networkOutput[0,0]: |
|
|
|
|
|
score = float(detection[2]) |
|
|
|
|
|
if score > 0.2: |
|
|
|
|
|
left = detection[3] * cols |
|
|
|
|
|
top = detection[4] * rows |
|
|
|
|
|
right = detection[5] * cols |
|
|
|
|
|
bottom = detection[6] * rows |
|
|
|
|
|
|
|
|
|
|
|
#draw a red rectangle around detected objects |
|
|
|
|
|
cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2) |
|
|
|
|
|
#draw category name in top left of rectangle |
|
|
|
|
|
cv2.putText(img, classes[int(detection[1])], (int(left), int(top-4)), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2, 8) |
|
|
|
|
|
|
|
|
|
|
|
cv2.imwrite(output, img) |
|
|
|
|
|
# Show the image with a rectagle surrounding the detected objects |