|
|
|
|
|
|
6
|
parser = argparse.ArgumentParser()
|
6
|
parser = argparse.ArgumentParser()
|
|
7
|
parser.add_argument('--webcam', help="True/False", default=False)
|
7
|
parser.add_argument('--webcam', help="True/False", default=False)
|
|
8
|
parser.add_argument('--play_video', help="Tue/False", default=False)
|
8
|
parser.add_argument('--play_video', help="Tue/False", default=False)
|
|
|
|
9
|
+parser.add_argument('--play_flask', help="Tue/False", default=False)
|
|
9
|
parser.add_argument('--image', help="Tue/False", default=False)
|
10
|
parser.add_argument('--image', help="Tue/False", default=False)
|
|
10
|
parser.add_argument('--video_path', help="Path of video file", default="videos/car_on_road.mp4")
|
11
|
parser.add_argument('--video_path', help="Path of video file", default="videos/car_on_road.mp4")
|
|
11
|
parser.add_argument('--image_path', help="Path of image to detect objects", default="Images/bicycle.jpg")
|
12
|
parser.add_argument('--image_path', help="Path of image to detect objects", default="Images/bicycle.jpg")
|
|
|
|
|
|
|
82
|
color = colors[i]
|
83
|
color = colors[i]
|
|
83
|
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
|
84
|
cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
|
|
84
|
cv2.putText(img, label, (x, y - 5), font, 3, color, 3)
|
85
|
cv2.putText(img, label, (x, y - 5), font, 3, color, 3)
|
|
85
|
- cv2.imshow("Image", img)
|
|
|
|
|
|
86
|
+ imS = cv2.resize(img, (960, 540))
|
|
|
|
87
|
+ cv2.imshow("Image", imS)
|
|
|
|
88
|
+
|
|
|
|
89
|
+def draw_labels2(boxes, confs, colors, class_ids, classes, img):
|
|
|
|
90
|
+ indexes = cv2.dnn.NMSBoxes(boxes, confs, 0.4, 0.4)
|
|
|
|
91
|
+ font = cv2.FONT_HERSHEY_PLAIN
|
|
|
|
92
|
+ for i in range(len(boxes)):
|
|
|
|
93
|
+ if i in indexes:
|
|
|
|
94
|
+ x, y, w, h = boxes[i]
|
|
|
|
95
|
+ label = str(classes[class_ids[i]])
|
|
|
|
96
|
+ color = colors[i]
|
|
|
|
97
|
+ cv2.rectangle(img, (x,y), (x+w, y+h), color, 2)
|
|
|
|
98
|
+ cv2.putText(img, label, (x, y - 5), font, 3, color, 3)
|
|
|
|
99
|
+ return img
|
|
86
|
|
100
|
|
|
87
|
def image_detect(img_path):
|
101
|
def image_detect(img_path):
|
|
88
|
model, classes, colors, output_layers = load_yolo()
|
102
|
model, classes, colors, output_layers = load_yolo()
|
|
|
|
|
|
|
119
|
blob, outputs = detect_objects(frame, model, output_layers)
|
133
|
blob, outputs = detect_objects(frame, model, output_layers)
|
|
120
|
boxes, confs, class_ids = get_box_dimensions(outputs, height, width)
|
134
|
boxes, confs, class_ids = get_box_dimensions(outputs, height, width)
|
|
121
|
draw_labels(boxes, confs, colors, class_ids, classes, frame)
|
135
|
draw_labels(boxes, confs, colors, class_ids, classes, frame)
|
|
122
|
- k = cv2.waitKey(1) & 0xFF
|
|
|
|
|
|
136
|
+ k = cv2.waitKey(50) & 0xFF
|
|
123
|
if k == 27:
|
137
|
if k == 27:
|
|
124
|
cv2.destroyAllWindows()
|
138
|
cv2.destroyAllWindows()
|
|
125
|
break
|
139
|
break
|
|
126
|
cap.release()
|
140
|
cap.release()
|
|
127
|
|
141
|
|
|
128
|
|
142
|
|
|
|
|
143
|
+from flask import Flask, render_template, Response
|
|
|
|
144
|
+
|
|
|
|
145
|
+app = Flask(__name__)
|
|
|
|
146
|
+def find_camera(id):
|
|
|
|
147
|
+ '''
|
|
|
|
148
|
+ cameras = ['rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp',
|
|
|
|
149
|
+ 'rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp']
|
|
|
|
150
|
+ '''
|
|
|
|
151
|
+ cameras = ['rtsp://admin:@Unv123456@192.168.10.252:554/unicast/c1/s1/live']
|
|
|
|
152
|
+ return cameras[int(id)]
|
|
|
|
153
|
+# for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
|
|
|
|
154
|
+# for webcam use zero(0)
|
|
|
|
155
|
+def gen_frames(camera_id):
|
|
|
|
156
|
+
|
|
|
|
157
|
+ cam = find_camera(camera_id)
|
|
|
|
158
|
+ video_path = cam
|
|
|
|
159
|
+ model, classes, colors, output_layers = load_yolo()
|
|
|
|
160
|
+ cap = cv2.VideoCapture(video_path)
|
|
|
|
161
|
+ while True:
|
|
|
|
162
|
+ _, frame = cap.read()
|
|
|
|
163
|
+ if frame is not None:
|
|
|
|
164
|
+ height, width, channels = frame.shape
|
|
|
|
165
|
+ blob, outputs = detect_objects(frame, model, output_layers)
|
|
|
|
166
|
+ boxes, confs, class_ids = get_box_dimensions(outputs, height, width)
|
|
|
|
167
|
+ frame = draw_labels2(boxes, confs, colors, class_ids, classes, frame)
|
|
|
|
168
|
+ ret, buffer = cv2.imencode('.jpg', frame)
|
|
|
|
169
|
+ frame = buffer.tobytes()
|
|
|
|
170
|
+ yield (b'--frame\r\n'
|
|
|
|
171
|
+ b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
|
|
|
|
172
|
+ cap.release()
|
|
|
|
173
|
+
|
|
|
|
174
|
+
|
|
|
|
175
|
+@app.route('/video_feed/<string:id>/', methods=["GET"])
|
|
|
|
176
|
+def video_feed(id):
|
|
|
|
177
|
+
|
|
|
|
178
|
+ """Video streaming route. Put this in the src attribute of an img tag."""
|
|
|
|
179
|
+ return Response(gen_frames(id),
|
|
|
|
180
|
+ mimetype='multipart/x-mixed-replace; boundary=frame')
|
|
|
|
181
|
+
|
|
129
|
|
182
|
|
|
130
|
if __name__ == '__main__':
|
183
|
if __name__ == '__main__':
|
|
131
|
webcam = args.webcam
|
184
|
webcam = args.webcam
|
|
132
|
video_play = args.play_video
|
185
|
video_play = args.play_video
|
|
|
|
186
|
+ flask_play = args.play_flask
|
|
133
|
image = args.image
|
187
|
image = args.image
|
|
134
|
if webcam:
|
188
|
if webcam:
|
|
135
|
if args.verbose:
|
189
|
if args.verbose:
|
|
|
|
|
|
|
146
|
print("Opening "+image_path+" .... ")
|
200
|
print("Opening "+image_path+" .... ")
|
|
147
|
image_detect(image_path)
|
201
|
image_detect(image_path)
|
|
148
|
|
202
|
|
|
149
|
-
|
|
|
|
|
|
203
|
+ if flask_play:
|
|
|
|
204
|
+ app.run(debug=True, port=9024)
|
|
150
|
cv2.destroyAllWindows()
|
205
|
cv2.destroyAllWindows()
|