Tum 3 vuotta sitten
commit
84fcb913c1
100 muutettua tiedostoa jossa 531 lisäystä ja 0 poistoa
  1. BIN
      .DS_Store
  2. BIN
      backgrounds/4223_bg.jpg
  3. BIN
      backgrounds/625_bg.jpg
  4. BIN
      backgrounds/6622_bg.jpg
  5. BIN
      backgrounds/815_bg.jpg
  6. 531 0
      blobDetection.py
  7. BIN
      cap/frame0.jpg
  8. BIN
      cap/frame1.jpg
  9. BIN
      cap/frame10.jpg
  10. BIN
      cap/frame100.jpg
  11. BIN
      cap/frame101.jpg
  12. BIN
      cap/frame102.jpg
  13. BIN
      cap/frame103.jpg
  14. BIN
      cap/frame104.jpg
  15. BIN
      cap/frame105.jpg
  16. BIN
      cap/frame106.jpg
  17. BIN
      cap/frame107.jpg
  18. BIN
      cap/frame108.jpg
  19. BIN
      cap/frame109.jpg
  20. BIN
      cap/frame11.jpg
  21. BIN
      cap/frame110.jpg
  22. BIN
      cap/frame111.jpg
  23. BIN
      cap/frame112.jpg
  24. BIN
      cap/frame113.jpg
  25. BIN
      cap/frame114.jpg
  26. BIN
      cap/frame115.jpg
  27. BIN
      cap/frame116.jpg
  28. BIN
      cap/frame117.jpg
  29. BIN
      cap/frame118.jpg
  30. BIN
      cap/frame119.jpg
  31. BIN
      cap/frame12.jpg
  32. BIN
      cap/frame120.jpg
  33. BIN
      cap/frame121.jpg
  34. BIN
      cap/frame122.jpg
  35. BIN
      cap/frame123.jpg
  36. BIN
      cap/frame124.jpg
  37. BIN
      cap/frame125.jpg
  38. BIN
      cap/frame126.jpg
  39. BIN
      cap/frame127.jpg
  40. BIN
      cap/frame128.jpg
  41. BIN
      cap/frame129.jpg
  42. BIN
      cap/frame13.jpg
  43. BIN
      cap/frame130.jpg
  44. BIN
      cap/frame131.jpg
  45. BIN
      cap/frame132.jpg
  46. BIN
      cap/frame133.jpg
  47. BIN
      cap/frame134.jpg
  48. BIN
      cap/frame135.jpg
  49. BIN
      cap/frame136.jpg
  50. BIN
      cap/frame137.jpg
  51. BIN
      cap/frame138.jpg
  52. BIN
      cap/frame139.jpg
  53. BIN
      cap/frame14.jpg
  54. BIN
      cap/frame140.jpg
  55. BIN
      cap/frame141.jpg
  56. BIN
      cap/frame142.jpg
  57. BIN
      cap/frame143.jpg
  58. BIN
      cap/frame144.jpg
  59. BIN
      cap/frame145.jpg
  60. BIN
      cap/frame146.jpg
  61. BIN
      cap/frame147.jpg
  62. BIN
      cap/frame148.jpg
  63. BIN
      cap/frame149.jpg
  64. BIN
      cap/frame15.jpg
  65. BIN
      cap/frame150.jpg
  66. BIN
      cap/frame151.jpg
  67. BIN
      cap/frame152.jpg
  68. BIN
      cap/frame153.jpg
  69. BIN
      cap/frame154.jpg
  70. BIN
      cap/frame155.jpg
  71. BIN
      cap/frame156.jpg
  72. BIN
      cap/frame157.jpg
  73. BIN
      cap/frame158.jpg
  74. BIN
      cap/frame159.jpg
  75. BIN
      cap/frame16.jpg
  76. BIN
      cap/frame160.jpg
  77. BIN
      cap/frame161.jpg
  78. BIN
      cap/frame162.jpg
  79. BIN
      cap/frame163.jpg
  80. BIN
      cap/frame164.jpg
  81. BIN
      cap/frame165.jpg
  82. BIN
      cap/frame166.jpg
  83. BIN
      cap/frame167.jpg
  84. BIN
      cap/frame168.jpg
  85. BIN
      cap/frame169.jpg
  86. BIN
      cap/frame17.jpg
  87. BIN
      cap/frame170.jpg
  88. BIN
      cap/frame171.jpg
  89. BIN
      cap/frame172.jpg
  90. BIN
      cap/frame173.jpg
  91. BIN
      cap/frame174.jpg
  92. BIN
      cap/frame175.jpg
  93. BIN
      cap/frame176.jpg
  94. BIN
      cap/frame177.jpg
  95. BIN
      cap/frame178.jpg
  96. BIN
      cap/frame179.jpg
  97. BIN
      cap/frame18.jpg
  98. BIN
      cap/frame180.jpg
  99. BIN
      cap/frame181.jpg
  100. 0 0
      cap/frame182.jpg

BIN
.DS_Store


BIN
backgrounds/4223_bg.jpg


BIN
backgrounds/625_bg.jpg


BIN
backgrounds/6622_bg.jpg


BIN
backgrounds/815_bg.jpg


+ 531 - 0
blobDetection.py

@@ -0,0 +1,531 @@
1
+#!/usr/bin/env python3
2
+# -*- coding: utf-8 -*-
3
+"""
4
+Created on Sat Sep 30 11:51:00 2017
5
+
6
+@author: alexdrake
7
+"""
8
+
9
+import cv2
10
+import numpy as np
11
+import time
12
+import logging
13
+import math
14
+import re
15
+from os import walk
16
+import os
17
+
18
+# Vehicle_counter from Dan Maesks response on
19
+# https://stackoverflow.com/questions/36254452/counting-cars-opencv-python-issue/36274515#36274515
20
+
21
+# get working directory
22
+loc = os.path.abspath('')
23
+
24
+# Video source
25
+inputFile = loc+'/inputs/625_201709280946.mp4'
26
+#inputFile = 'rtsp://admin:@Unv123456@192.168.10.252:554/unicast/c1/s1/live'
27
+# for testing
28
+tracked_blobs = []
29
+tracked_conts = []
30
+t_retval = []
31
+frame_no = 0
32
+frame_w = 0
33
+
34
+# ============================================================================
35
+
36
+class Vehicle(object):
37
+    def __init__(self, id, position):
38
+        self.id = id
39
+        self.positions = [position]
40
+        self.frames_since_seen = 0
41
+        self.frames_seen = 0
42
+        self.counted = False
43
+        self.vehicle_dir = 0
44
+
45
+    @property
46
+    def last_position(self):
47
+        return self.positions[-1]
48
+    @property
49
+    def last_position2(self):
50
+        return self.positions[-2]
51
+
52
+    def add_position(self, new_position):
53
+        self.positions.append(new_position)
54
+        self.frames_since_seen = 0
55
+        self.frames_seen += 1
56
+
57
+    def draw(self, output_image):
58
+        for point in self.positions:
59
+            cv2.circle(output_image, point, 2, (0, 0, 255), -1)
60
+            cv2.polylines(output_image, [np.int32(self.positions)]
61
+                , False, (0, 0, 255), 1)
62
+
63
+# ============================================================================
64
+
65
+class VehicleCounter(object):
66
+    def __init__(self, shape, divider):
67
+        self.log = logging.getLogger("vehicle_counter")
68
+
69
+        self.height, self.width = shape
70
+        self.divider = divider
71
+
72
+        self.vehicles = []
73
+        self.next_vehicle_id = 0
74
+        self.vehicle_count = 0
75
+        self.vehicle_LHS = 0
76
+        self.vehicle_RHS = 0
77
+        self.max_unseen_frames = 10
78
+
79
+
80
+    @staticmethod
81
+    def get_vector(a, b):
82
+        """Calculate vector (distance, angle in degrees) from point a to point b.
83
+
84
+        Angle ranges from -180 to 180 degrees.
85
+        Vector with angle 0 points straight down on the image.
86
+        Values decrease in clockwise direction.
87
+        """
88
+        dx = float(b[0] - a[0])
89
+        dy = float(b[1] - a[1])
90
+
91
+        distance = math.sqrt(dx**2 + dy**2)
92
+
93
+        if dy > 0:
94
+            angle = math.degrees(math.atan(-dx/dy))
95
+        elif dy == 0:
96
+            if dx < 0:
97
+                angle = 90.0
98
+            elif dx > 0:
99
+                angle = -90.0
100
+            else:
101
+                angle = 0.0
102
+        else:
103
+            if dx < 0:
104
+                angle = 180 - math.degrees(math.atan(dx/dy))
105
+            elif dx > 0:
106
+                angle = -180 - math.degrees(math.atan(dx/dy))
107
+            else:
108
+                angle = 180.0
109
+
110
+        return distance, angle, dx, dy
111
+
112
+
113
+    @staticmethod
114
+    def is_valid_vector(a, b):
115
+        # vector is only valid if threshold distance is less than 12
116
+        # and if vector deviation is less than 30 or greater than 330 degs
117
+        distance, angle, _, _ = a
118
+        threshold_distance = 12.0
119
+        return (distance <= threshold_distance)
120
+
121
+
122
+    def update_vehicle(self, vehicle, matches):
123
+        # Find if any of the matches fits this vehicle
124
+        for i, match in enumerate(matches):
125
+            contour, centroid = match
126
+
127
+            # store the vehicle data
128
+            vector = self.get_vector(vehicle.last_position, centroid)
129
+
130
+            # only measure angle deviation if we have enough points
131
+            if vehicle.frames_seen > 2:
132
+                prevVector = self.get_vector(vehicle.last_position2, vehicle.last_position)
133
+                angleDev = abs(prevVector[1]-vector[1])
134
+            else:
135
+                angleDev = 0
136
+
137
+            b = dict(
138
+                    id = vehicle.id,
139
+                    center_x = centroid[0],
140
+                    center_y = centroid[1],
141
+                    vector_x = vector[0],
142
+                    vector_y = vector[1],
143
+                    dx = vector[2],
144
+                    dy = vector[3],
145
+                    counted = vehicle.counted,
146
+                    frame_number = frame_no,
147
+                    angle_dev = angleDev
148
+                    )
149
+
150
+            tracked_blobs.append(b)
151
+
152
+            # check validity
153
+            if self.is_valid_vector(vector, angleDev):
154
+                vehicle.add_position(centroid)
155
+                vehicle.frames_seen += 1
156
+                # check vehicle direction
157
+                if vector[3] > 0:
158
+                    # positive value means vehicle is moving DOWN
159
+                    vehicle.vehicle_dir = 1
160
+                elif vector[3] < 0:
161
+                    # negative value means vehicle is moving UP
162
+                    vehicle.vehicle_dir = -1
163
+                self.log.debug("Added match (%d, %d) to vehicle #%d. vector=(%0.2f,%0.2f)"
164
+                    , centroid[0], centroid[1], vehicle.id, vector[0], vector[1])
165
+                return i
166
+
167
+        # No matches fit...
168
+        vehicle.frames_since_seen += 1
169
+        self.log.debug("No match for vehicle #%d. frames_since_seen=%d"
170
+            , vehicle.id, vehicle.frames_since_seen)
171
+
172
+        return None
173
+
174
+
175
+    def update_count(self, matches, output_image = None):
176
+        self.log.debug("Updating count using %d matches...", len(matches))
177
+
178
+        # First update all the existing vehicles
179
+        for vehicle in self.vehicles:
180
+            i = self.update_vehicle(vehicle, matches)
181
+            if i is not None:
182
+                del matches[i]
183
+
184
+        # Add new vehicles based on the remaining matches
185
+        for match in matches:
186
+            contour, centroid = match
187
+            new_vehicle = Vehicle(self.next_vehicle_id, centroid)
188
+            self.next_vehicle_id += 1
189
+            self.vehicles.append(new_vehicle)
190
+            self.log.debug("Created new vehicle #%d from match (%d, %d)."
191
+                , new_vehicle.id, centroid[0], centroid[1])
192
+
193
+        # Count any uncounted vehicles that are past the divider
194
+        for vehicle in self.vehicles:
195
+            if not vehicle.counted and (((vehicle.last_position[1] > self.divider) and (vehicle.vehicle_dir == 1)) or
196
+                                          ((vehicle.last_position[1] < self.divider) and (vehicle.vehicle_dir == -1))) and (vehicle.frames_seen > 6):
197
+
198
+                vehicle.counted = True
199
+                # update appropriate counter
200
+                if ((vehicle.last_position[1] > self.divider) and (vehicle.vehicle_dir == 1) and (vehicle.last_position[0] >= (int(frame_w/2)-10))):
201
+                    self.vehicle_RHS += 1
202
+                    self.vehicle_count += 1
203
+                elif ((vehicle.last_position[1] < self.divider) and (vehicle.vehicle_dir == -1) and (vehicle.last_position[0] <= (int(frame_w/2)+10))):
204
+                    self.vehicle_LHS += 1
205
+                    self.vehicle_count += 1
206
+
207
+                self.log.debug("Counted vehicle #%d (total count=%d)."
208
+                    , vehicle.id, self.vehicle_count)
209
+
210
+        # Optionally draw the vehicles on an image
211
+        if output_image is not None:
212
+            for vehicle in self.vehicles:
213
+                vehicle.draw(output_image)
214
+
215
+            # LHS
216
+            cv2.putText(output_image, ("LH Lane: %02d" % self.vehicle_LHS), (12, 56)
217
+                , cv2.FONT_HERSHEY_PLAIN, 1.2, (127,255, 255), 2)
218
+            # RHS
219
+            cv2.putText(output_image, ("RH Lane: %02d" % self.vehicle_RHS), (216, 56)
220
+                , cv2.FONT_HERSHEY_PLAIN, 1.2, (127, 255, 255), 2)
221
+
222
+        # Remove vehicles that have not been seen long enough
223
+        removed = [ v.id for v in self.vehicles
224
+            if v.frames_since_seen >= self.max_unseen_frames ]
225
+        self.vehicles[:] = [ v for v in self.vehicles
226
+            if not v.frames_since_seen >= self.max_unseen_frames ]
227
+        for id in removed:
228
+            self.log.debug("Removed vehicle #%d.", id)
229
+
230
+        self.log.debug("Count updated, tracking %d vehicles.", len(self.vehicles))
231
+
232
+# ============================================================================
233
+
234
+def process_video():
235
+    global frame_no
236
+    global frame_w
237
+    camera = re.match(r".*/(\d+)_.*", inputFile)
238
+    camera = camera.group(1)
239
+
240
+# import video file
241
+    cap = cv2.VideoCapture(inputFile)
242
+
243
+# get list of background files
244
+    f = []
245
+    for (_, _, filenames) in walk(loc+"/backgrounds/"):
246
+        f.extend(filenames)
247
+        break
248
+
249
+# if background exists for camera: import, else avg will be built on fly
250
+    if camera+"_bg.jpg" in f:
251
+        bg = loc+"/backgrounds/"+camera+"_bg.jpg"
252
+        default_bg = cv2.imread(bg)
253
+        default_bg = cv2.cvtColor(default_bg, cv2.COLOR_BGR2HSV)
254
+        (_,avgSat,default_bg) = cv2.split(default_bg)
255
+        avg = default_bg.copy().astype("float")
256
+    else:
257
+        avg = None
258
+
259
+# get frame size
260
+    frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
261
+    frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
262
+
263
+# create a mask (manual for each camera)
264
+    mask = np.zeros((frame_h,frame_w), np.uint8)
265
+    mask[:,:] = 255
266
+    mask[:100, :] = 0
267
+    mask[230:, 160:190] = 0
268
+    mask[170:230,170:190] = 0
269
+    mask[140:170,176:190] = 0
270
+    mask[100:140,176:182] = 0
271
+
272
+# The cutoff for threshold. A lower number means smaller changes between
273
+# the average and current scene are more readily detected.
274
+    THRESHOLD_SENSITIVITY = 40
275
+    t_retval.append(THRESHOLD_SENSITIVITY)
276
+# Blob size limit before we consider it for tracking.
277
+    CONTOUR_WIDTH = 21
278
+    CONTOUR_HEIGHT = 16#21
279
+# The weighting to apply to "this" frame when averaging. A higher number
280
+# here means that the average scene will pick up changes more readily,
281
+# thus making the difference between average and current scenes smaller.
282
+    DEFAULT_AVERAGE_WEIGHT = 0.01
283
+    INITIAL_AVERAGE_WEIGHT = DEFAULT_AVERAGE_WEIGHT / 50
284
+# Blob smoothing function, to join 'gaps' in cars
285
+    SMOOTH = max(2,int(round((CONTOUR_WIDTH**0.5)/2,0)))
286
+# Constants for drawing on the frame.
287
+    LINE_THICKNESS = 1
288
+
289
+    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
290
+    out = loc+'/outputs/'+camera+'_output.mp4'
291
+#print(out)
292
+#exit()
293
+    out = cv2.VideoWriter(out, fourcc, 20, (frame_w, frame_h))
294
+
295
+    outblob = loc+'/outputs/'+camera+'_outblob.mp4'
296
+    diffop = loc+'/outputs/'+camera+'_outdiff.mp4'
297
+    outblob = cv2.VideoWriter(outblob, fourcc, 20, (frame_w, frame_h))
298
+    diffop = cv2.VideoWriter(diffop, fourcc, 20, (frame_w, frame_h))
299
+
300
+# A list of "tracked blobs".
301
+    blobs = []
302
+    car_counter = None  # will be created later
303
+    frame_no = 0
304
+
305
+    total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
306
+    total_cars = 0
307
+
308
+    start_time = time.time()
309
+    ret, frame = cap.read()
310
+
311
+    while ret:
312
+        ret, frame = cap.read()
313
+        frame_no = frame_no + 1
314
+
315
+        if ret and frame_no < total_frames:
316
+
317
+            print("Processing frame ",frame_no)
318
+
319
+            # get returned time
320
+            frame_time = time.time()
321
+
322
+            # convert BGR to HSV
323
+            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
324
+
325
+            # only use the Value channel of the frame
326
+            (_,_,grayFrame) = cv2.split(frame)
327
+            grayFrame = cv2.bilateralFilter(grayFrame, 11, 21, 21)
328
+
329
+            if avg is None:
330
+                # Set up the average if this is the first time through.
331
+                avg = grayFrame.copy().astype("float")
332
+                continue
333
+
334
+            # Build the average scene image by accumulating this frame
335
+            # with the existing average.
336
+            if frame_no < 10:
337
+                def_wt = INITIAL_AVERAGE_WEIGHT
338
+            else:
339
+                def_wt = DEFAULT_AVERAGE_WEIGHT
340
+
341
+            cv2.accumulateWeighted(grayFrame, avg, def_wt)
342
+
343
+            # export averaged background for use in next video feed run
344
+            #if frame_no > int(total_frames * 0.975):
345
+            if frame_no > int(200):
346
+                grayOp = cv2.cvtColor(cv2.convertScaleAbs(avg), cv2.COLOR_GRAY2BGR)
347
+                backOut = loc+"/backgrounds/"+camera+"_bg.jpg"
348
+                cv2.imwrite(backOut, grayOp)
349
+
350
+            # Compute the grayscale difference between the current grayscale frame and
351
+            # the average of the scene.
352
+            differenceFrame = cv2.absdiff(grayFrame, cv2.convertScaleAbs(avg))
353
+            # blur the difference image
354
+            differenceFrame = cv2.GaussianBlur(differenceFrame, (5, 5), 0)
355
+#        cv2.imshow("difference", differenceFrame)
356
+            diffout = cv2.cvtColor(differenceFrame, cv2.COLOR_GRAY2BGR)
357
+            diffop.write(diffout)
358
+
359
+            # get estimated otsu threshold level
360
+            retval, _ = cv2.threshold(differenceFrame, 0, 255,
361
+                                      cv2.THRESH_BINARY+cv2.THRESH_OTSU)
362
+            # add to list of threshold levels
363
+            t_retval.append(retval)
364
+
365
+            # apply threshold based on average threshold value
366
+            if frame_no < 10:
367
+                ret2, thresholdImage = cv2.threshold(differenceFrame,
368
+                                                     int(np.mean(t_retval)*0.9),
369
+                                                     255, cv2.THRESH_BINARY)
370
+            else:
371
+                ret2, thresholdImage = cv2.threshold(differenceFrame,
372
+                                                 int(np.mean(t_retval[-10:-1])*0.9),
373
+                                                 255, cv2.THRESH_BINARY)
374
+
375
+            # We'll need to fill in the gaps to make a complete vehicle as windows
376
+            # and other features can split them!
377
+            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (SMOOTH, SMOOTH))
378
+            # Fill any small holes
379
+            thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_CLOSE, kernel)
380
+
381
+            # Remove noise
382
+            thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_OPEN, kernel)
383
+
384
+            # Dilate to merge adjacent blobs
385
+            thresholdImage = cv2.dilate(thresholdImage, kernel, iterations = 2)
386
+
387
+            # apply mask
388
+            thresholdImage = cv2.bitwise_and(thresholdImage, thresholdImage, mask = mask)
389
+#        cv2.imshow("threshold", thresholdImage)
390
+            threshout = cv2.cvtColor(thresholdImage, cv2.COLOR_GRAY2BGR)
391
+            outblob.write(threshout)
392
+
393
+            # Find contours aka blobs in the threshold image.
394
+            contours, hierarchy = cv2.findContours(thresholdImage,
395
+                                                      cv2.RETR_EXTERNAL,
396
+                                                      cv2.CHAIN_APPROX_SIMPLE)
397
+
398
+            print("Found ",len(contours)," vehicle contours.")
399
+            # process contours if they exist!
400
+            if contours:
401
+                for (i, contour) in enumerate(contours):
402
+                    # Find the bounding rectangle and center for each blob
403
+                    (x, y, w, h) = cv2.boundingRect(contour)
404
+                    contour_valid = (w > CONTOUR_WIDTH) and (h > CONTOUR_HEIGHT)
405
+
406
+                    print("Contour #",i,": pos=(x=",x,", y=",y,") size=(w=",w,
407
+                          ", h=",h,") valid=",contour_valid)
408
+
409
+                    if not contour_valid:
410
+                        continue
411
+
412
+                    center = (int(x + w/2), int(y + h/2))
413
+                    blobs.append(((x, y, w, h), center))
414
+
415
+            for (i, match) in enumerate(blobs):
416
+                contour, centroid = match
417
+                x, y, w, h = contour
418
+
419
+                # store the contour data
420
+                c = dict(
421
+                            frame_no = frame_no,
422
+                            centre_x = x,
423
+                            centre_y = y,
424
+                            width = w,
425
+                            height = h
426
+                            )
427
+                tracked_conts.append(c)
428
+
429
+                cv2.rectangle(frame, (x, y), (x + w - 1, y + h - 1), (0, 0, 255), LINE_THICKNESS)
430
+                cv2.circle(frame, centroid, 2, (0, 0, 255), -1)
431
+
432
+            if car_counter is None:
433
+                print("Creating vehicle counter...")
434
+                car_counter = VehicleCounter(frame.shape[:2], 2*frame.shape[0] / 3)
435
+
436
+            # get latest count
437
+            car_counter.update_count(blobs, frame)
438
+            current_count = car_counter.vehicle_RHS + car_counter.vehicle_LHS
439
+
440
+            # print elapsed time to console
441
+            elapsed_time = time.time()-start_time
442
+            print("-- %s seconds --" % round(elapsed_time,2))
443
+
444
+            # output video
445
+            frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)
446
+
447
+            # draw dividing line
448
+            # flash green when new car counted
449
+            if current_count > total_cars:
450
+                cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
451
+                     (0,255,0), 2*LINE_THICKNESS)
452
+            else:
453
+                cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
454
+                 (0,0,255), LINE_THICKNESS)
455
+
456
+             # update with latest count
457
+            total_cars = current_count
458
+
459
+            # draw upper limit
460
+            cv2.line(frame, (0, 100),(frame_w, 100), (0,0,0), LINE_THICKNESS)
461
+
462
+            ret, buffer = cv2.imencode('.jpg', frame)
463
+            frame2 = buffer.tobytes()
464
+            yield (b'--frame\r\n'
465
+                b'Content-Type: image/jpeg\r\n\r\n' + frame2 + b'\r\n')  # concat frame one by one and show result
466
+
467
+            #cv2.imshow("preview", frame)
468
+            #cv2.imwrite("../flask-hls-demo/static/frame.jpg", frame)
469
+            out.write(frame)
470
+
471
+            if cv2.waitKey(27) and 0xFF == ord('q'):
472
+                break
473
+        else:
474
+            break
475
+
476
+    #cv2.line()
477
+    #cv2.destroyAllWindows()
478
+    #cap.release()
479
+    #out.release()
480
+
481
+from flask import Flask, render_template, Response
482
+import cv2
483
+
484
+app = Flask(__name__)
485
+
486
+
487
+def find_camera(id):
488
+    '''
489
+    cameras = ['rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp',
490
+    'rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp']
491
+    '''
492
+    cameras = ['rtsp://admin:@Unv123456@192.168.10.252:554/unicast/c1/s1/live']
493
+    return cameras[int(id)]
494
+#  for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
495
+#  for webcam use zero(0)
496
+
497
+
498
+def gen_frames(camera_id):
499
+
500
+    cam = find_camera(camera_id)
501
+    cap=  cv2.VideoCapture(cam)
502
+
503
+    while True:
504
+        # for cap in caps:
505
+        # # Capture frame-by-frame
506
+        success, frame = cap.read()  # read the camera frame
507
+        if not success:
508
+            break
509
+        else:
510
+            ret, buffer = cv2.imencode('.jpg', frame)
511
+            frame = buffer.tobytes()
512
+            yield (b'--frame\r\n'
513
+                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')  # concat frame one by one and show result
514
+@app.route('/video_feed/<string:id>/', methods=["GET"])
515
+def video_feed(id):
516
+
517
+    """Video streaming route. Put this in the src attribute of an img tag."""
518
+    '''
519
+    return Response(gen_frames(id),
520
+                    mimetype='multipart/x-mixed-replace; boundary=frame')
521
+    '''
522
+    return Response(process_video(),
523
+                    mimetype='multipart/x-mixed-replace; boundary=frame')
524
+
525
+@app.route('/', methods=["GET"])
526
+def index():
527
+    return render_template('index.html')
528
+
529
+
530
+if __name__ == '__main__':
531
+    app.run(debug=True, port=9099)

BIN
cap/frame0.jpg


BIN
cap/frame1.jpg


BIN
cap/frame10.jpg


BIN
cap/frame100.jpg


BIN
cap/frame101.jpg


BIN
cap/frame102.jpg


BIN
cap/frame103.jpg


BIN
cap/frame104.jpg


BIN
cap/frame105.jpg


BIN
cap/frame106.jpg


BIN
cap/frame107.jpg


BIN
cap/frame108.jpg


BIN
cap/frame109.jpg


BIN
cap/frame11.jpg


BIN
cap/frame110.jpg


BIN
cap/frame111.jpg


BIN
cap/frame112.jpg


BIN
cap/frame113.jpg


BIN
cap/frame114.jpg


BIN
cap/frame115.jpg


BIN
cap/frame116.jpg


BIN
cap/frame117.jpg


BIN
cap/frame118.jpg


BIN
cap/frame119.jpg


BIN
cap/frame12.jpg


BIN
cap/frame120.jpg


BIN
cap/frame121.jpg


BIN
cap/frame122.jpg


BIN
cap/frame123.jpg


BIN
cap/frame124.jpg


BIN
cap/frame125.jpg


BIN
cap/frame126.jpg


BIN
cap/frame127.jpg


BIN
cap/frame128.jpg


BIN
cap/frame129.jpg


BIN
cap/frame13.jpg


BIN
cap/frame130.jpg


BIN
cap/frame131.jpg


BIN
cap/frame132.jpg


BIN
cap/frame133.jpg


BIN
cap/frame134.jpg


BIN
cap/frame135.jpg


BIN
cap/frame136.jpg


BIN
cap/frame137.jpg


BIN
cap/frame138.jpg


BIN
cap/frame139.jpg


BIN
cap/frame14.jpg


BIN
cap/frame140.jpg


BIN
cap/frame141.jpg


BIN
cap/frame142.jpg


BIN
cap/frame143.jpg


BIN
cap/frame144.jpg


BIN
cap/frame145.jpg


BIN
cap/frame146.jpg


BIN
cap/frame147.jpg


BIN
cap/frame148.jpg


BIN
cap/frame149.jpg


BIN
cap/frame15.jpg


BIN
cap/frame150.jpg


BIN
cap/frame151.jpg


BIN
cap/frame152.jpg


BIN
cap/frame153.jpg


BIN
cap/frame154.jpg


BIN
cap/frame155.jpg


BIN
cap/frame156.jpg


BIN
cap/frame157.jpg


BIN
cap/frame158.jpg


BIN
cap/frame159.jpg


BIN
cap/frame16.jpg


BIN
cap/frame160.jpg


BIN
cap/frame161.jpg


BIN
cap/frame162.jpg


BIN
cap/frame163.jpg


BIN
cap/frame164.jpg


BIN
cap/frame165.jpg


BIN
cap/frame166.jpg


BIN
cap/frame167.jpg


BIN
cap/frame168.jpg


BIN
cap/frame169.jpg


BIN
cap/frame17.jpg


BIN
cap/frame170.jpg


BIN
cap/frame171.jpg


BIN
cap/frame172.jpg


BIN
cap/frame173.jpg


BIN
cap/frame174.jpg


BIN
cap/frame175.jpg


BIN
cap/frame176.jpg


BIN
cap/frame177.jpg


BIN
cap/frame178.jpg


BIN
cap/frame179.jpg


BIN
cap/frame18.jpg


BIN
cap/frame180.jpg


BIN
cap/frame181.jpg


+ 0 - 0
cap/frame182.jpg


Some files were not shown because too many files changed in this diff