Browse Source

phase2

master
first_user 5 years ago
parent
commit
ab1cf557b5
  1. 2
      .gitignore
  2. 50
      detect_app/detect.py
  3. 4
      download.sh
  4. 12
      requirements.txt

2
.gitignore

@ -3,3 +3,5 @@ db.sqlite3
*.pyc
detect_app/static/detect_app/images/input.jpg
detect_app/static/detect_app/images/output.jpg
model/frozen_inference_graph.pb
model/graph.pbtxt

50
detect_app/detect.py

@ -1,4 +1,50 @@
import shutil
import cv2
def detect(input, output):
shutil.copy2(input, output)
classes = {0: 'background',
1: 'person', 2: 'bicycle', 3: 'car', 4: 'motorcycle', 5: 'airplane', 6: 'bus',
7: 'train', 8: 'truck', 9: 'boat', 10: 'traffic light', 11: 'fire hydrant',
13: 'stop sign', 14: 'parking meter', 15: 'bench', 16: 'bird', 17: 'cat',
18: 'dog', 19: 'horse', 20: 'sheep', 21: 'cow', 22: 'elephant', 23: 'bear',
24: 'zebra', 25: 'giraffe', 27: 'backpack', 28: 'umbrella', 31: 'handbag',
32: 'tie', 33: 'suitcase', 34: 'frisbee', 35: 'skis', 36: 'snowboard',
37: 'sports ball', 38: 'kite', 39: 'baseball bat', 40: 'baseball glove',
41: 'skateboard', 42: 'surfboard', 43: 'tennis racket', 44: 'bottle',
46: 'wine glass', 47: 'cup', 48: 'fork', 49: 'knife', 50: 'spoon',
51: 'bowl', 52: 'banana', 53: 'apple', 54: 'sandwich', 55: 'orange',
56: 'broccoli', 57: 'carrot', 58: 'hot dog', 59: 'pizza', 60: 'donut',
61: 'cake', 62: 'chair', 63: 'couch', 64: 'potted plant', 65: 'bed',
67: 'dining table', 70: 'toilet', 72: 'tv', 73: 'laptop', 74: 'mouse',
75: 'remote', 76: 'keyboard', 77: 'cell phone', 78: 'microwave', 79: 'oven',
80: 'toaster', 81: 'sink', 82: 'refrigerator', 84: 'book', 85: 'clock',
86: 'vase', 87: 'scissors', 88: 'teddy bear', 89: 'hair drier', 90: 'toothbrush'}
# Load a model imported from Tensorflow
tensorflowNet = cv2.dnn.readNetFromTensorflow('./model/frozen_inference_graph.pb', './model/graph.pbtxt')
# Input image
img = cv2.imread(input)
rows, cols, channels = img.shape
# Use the given image as input, which needs to be blob(s).
tensorflowNet.setInput(cv2.dnn.blobFromImage(img, size=(300, 300), swapRB=True, crop=False))
# Runs a forward pass to compute the net output
networkOutput = tensorflowNet.forward()
# Loop on the outputs
for detection in networkOutput[0,0]:
score = float(detection[2])
if score > 0.2:
left = detection[3] * cols
top = detection[4] * rows
right = detection[5] * cols
bottom = detection[6] * rows
#draw a red rectangle around detected objects
cv2.rectangle(img, (int(left), int(top)), (int(right), int(bottom)), (0, 0, 255), thickness=2)
#draw category name in top left of rectangle
cv2.putText(img, classes[int(detection[1])], (int(left), int(top-4)), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 255), 2, 8)
cv2.imwrite(output, img)
# Show the image with a rectagle surrounding the detected objects

4
download.sh

@ -0,0 +1,4 @@
#!/bin/sh
mkdir -p model
wget -O - 'http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v2_coco_2018_03_29.tar.gz' | tar xvfzO - ssd_mobilenet_v2_coco_2018_03_29/frozen_inference_graph.pb > model/frozen_inference_graph.pb
wget -O - 'https://raw.githubusercontent.com/opencv/opencv_extra/master/testdata/dnn/ssd_mobilenet_v2_coco_2018_03_29.pbtxt' > model/graph.pbtxt

12
requirements.txt

@ -1,5 +1,17 @@
asgiref==3.2.3
astroid==2.3.3
autopep8==1.5
Django==3.0.3
isort==4.3.21
lazy-object-proxy==1.4.3
mccabe==0.6.1
numpy==1.18.1
opencv-python==4.2.0.32
pkg-resources==0.0.0
pycodestyle==2.5.0
pylint==2.4.4
pytz==2019.3
six==1.14.0
sqlparse==0.3.0
typed-ast==1.4.1
wrapt==1.11.2
Loading…
Cancel
Save