191
+                , new_vehicle.id, centroid[0], centroid[1])
192
+
193
+        # Count any uncounted vehicles that are past the divider
194
+        for vehicle in self.vehicles:
195
+            if not vehicle.counted and (((vehicle.last_position[1] > self.divider) and (vehicle.vehicle_dir == 1)) or
196
+                                          ((vehicle.last_position[1] < self.divider) and (vehicle.vehicle_dir == -1))) and (vehicle.frames_seen > 6):
197
+
198
+                vehicle.counted = True
199
+                # update appropriate counter
200
+                if ((vehicle.last_position[1] > self.divider) and (vehicle.vehicle_dir == 1) and (vehicle.last_position[0] >= (int(frame_w/2)-10))):
201
+                    self.vehicle_RHS += 1
202
+                    self.vehicle_count += 1
203
+                elif ((vehicle.last_position[1] < self.divider) and (vehicle.vehicle_dir == -1) and (vehicle.last_position[0] <= (int(frame_w/2)+10))):
204
+                    self.vehicle_LHS += 1
205
+                    self.vehicle_count += 1
206
+
207
+                self.log.debug("Counted vehicle #%d (total count=%d)."
208
+                    , vehicle.id, self.vehicle_count)
209
+
210
+        # Optionally draw the vehicles on an image
211
+        if output_image is not None:
212
+            for vehicle in self.vehicles:
213
+                vehicle.draw(output_image)
214
+
215
+            # LHS
216
+            cv2.putText(output_image, ("LH Lane: %02d" % self.vehicle_LHS), (12, 56)
217
+                , cv2.FONT_HERSHEY_PLAIN, 1.2, (127,255, 255), 2)
218
+            # RHS
219
+            cv2.putText(output_image, ("RH Lane: %02d" % self.vehicle_RHS), (216, 56)
220
+                , cv2.FONT_HERSHEY_PLAIN, 1.2, (127, 255, 255), 2)
221
+
222
+        # Remove vehicles that have not been seen long enough
223
+        removed = [ v.id for v in self.vehicles
224
+            if v.frames_since_seen >= self.max_unseen_frames ]
225
+        self.vehicles[:] = [ v for v in self.vehicles
226
+            if not v.frames_since_seen >= self.max_unseen_frames ]
227
+        for id in removed:
228
+            self.log.debug("Removed vehicle #%d.", id)
229
+
230
+        self.log.debug("Count updated, tracking %d vehicles.", len(self.vehicles))
231
+
232
+# ============================================================================
233
+
234
+def process_video():
235
+    global frame_no
236
+    global frame_w
237
+    camera = re.match(r".*/(\d+)_.*", inputFile)
238
+    camera = camera.group(1)
239
+
240
+# import video file
241
+    cap = cv2.VideoCapture(inputFile)
242
+
243
+# get list of background files
244
+    f = []
245
+    for (_, _, filenames) in walk(loc+"/backgrounds/"):
246
+        f.extend(filenames)
247
+        break
248
+
249
+# if background exists for camera: import, else avg will be built on fly
250
+    if camera+"_bg.jpg" in f:
251
+        bg = loc+"/backgrounds/"+camera+"_bg.jpg"
252
+        default_bg = cv2.imread(bg)
253
+        default_bg = cv2.cvtColor(default_bg, cv2.COLOR_BGR2HSV)
254
+        (_,avgSat,default_bg) = cv2.split(default_bg)
255
+        avg = default_bg.copy().astype("float")
256
+    else:
257
+        avg = None
258
+
259
+# get frame size
260
+    frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
261
+    frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
262
+
263
+# create a mask (manual for each camera)
264
+    mask = np.zeros((frame_h,frame_w), np.uint8)
265
+    mask[:,:] = 255
266
+    mask[:100, :] = 0
267
+    mask[230:, 160:190] = 0
268
+    mask[170:230,170:190] = 0
269
+    mask[140:170,176:190] = 0
270
+    mask[100:140,176:182] = 0
271
+
272
+# The cutoff for threshold. A lower number means smaller changes between
273
+# the average and current scene are more readily detected.
274
+    THRESHOLD_SENSITIVITY = 40
275
+    t_retval.append(THRESHOLD_SENSITIVITY)
276
+# Blob size limit before we consider it for tracking.
277
+    CONTOUR_WIDTH = 21
278
+    CONTOUR_HEIGHT = 16#21
279
+# The weighting to apply to "this" frame when averaging. A higher number
280
+# here means that the average scene will pick up changes more readily,
281
+# thus making the difference between average and current scenes smaller.
282
+    DEFAULT_AVERAGE_WEIGHT = 0.01
283
+    INITIAL_AVERAGE_WEIGHT = DEFAULT_AVERAGE_WEIGHT / 50
284
+# Blob smoothing function, to join 'gaps' in cars
285
+    SMOOTH = max(2,int(round((CONTOUR_WIDTH**0.5)/2,0)))
286
+# Constants for drawing on the frame.
287
+    LINE_THICKNESS = 1
288
+
289
+    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
290
+    out = loc+'/outputs/'+camera+'_output.mp4'
291
+#print(out)
292
+#exit()
293
+    out = cv2.VideoWriter(out, fourcc, 20, (frame_w, frame_h))
294
+
295
+    outblob = loc+'/outputs/'+camera+'_outblob.mp4'
296
+    diffop = loc+'/outputs/'+camera+'_outdiff.mp4'
297
+    outblob = cv2.VideoWriter(outblob, fourcc, 20, (frame_w, frame_h))
298
+    diffop = cv2.VideoWriter(diffop, fourcc, 20, (frame_w, frame_h))
299
+
300
+# A list of "tracked blobs".
301
+    blobs = []
302
+    car_counter = None  # will be created later
303
+    frame_no = 0
304
+
305
+    total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
306
+    total_cars = 0
307
+
308
+    start_time = time.time()
309
+    ret, frame = cap.read()
310
+
311
+    while ret:
312
+        ret, frame = cap.read()
313
+        frame_no = frame_no + 1
314
+
315
+        if ret and frame_no < total_frames:
316
+
317
+            print("Processing frame ",frame_no)
318
+
319
+            # get returned time
320
+            frame_time = time.time()
321
+
322
+            # convert BGR to HSV
323
+            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
324
+
325
+            # only use the Value channel of the frame
326
+            (_,_,grayFrame) = cv2.split(frame)
327
+            grayFrame = cv2.bilateralFilter(grayFrame, 11, 21, 21)
328
+
329
+            if avg is None:
330
+                # Set up the average if this is the first time through.
331
+                avg = grayFrame.copy().astype("float")
332
+                continue
333
+
334
+            # Build the average scene image by accumulating this frame
335
+            # with the existing average.
336
+            if frame_no < 10:
337
+                def_wt = INITIAL_AVERAGE_WEIGHT
338
+            else:
339
+                def_wt = DEFAULT_AVERAGE_WEIGHT
340
+
341
+            cv2.accumulateWeighted(grayFrame, avg, def_wt)
342
+
343
+            # export averaged background for use in next video feed run
344
+            #if frame_no > int(total_frames * 0.975):
345
+            if frame_no > int(200):
346
+                grayOp = cv2.cvtColor(cv2.convertScaleAbs(avg), cv2.COLOR_GRAY2BGR)
347
+                backOut = loc+"/backgrounds/"+camera+"_bg.jpg"
348
+                cv2.imwrite(backOut, grayOp)
349
+
350
+            # Compute the grayscale difference between the current grayscale frame and
351
+            # the average of the scene.
352
+            differenceFrame = cv2.absdiff(grayFrame, cv2.convertScaleAbs(avg))
353
+            # blur the difference image
354
+            differenceFrame = cv2.GaussianBlur(differenceFrame, (5, 5), 0)
355
+#        cv2.imshow("difference", differenceFrame)
356
+            diffout = cv2.cvtColor(differenceFrame, cv2.COLOR_GRAY2BGR)
357
+            diffop.write(diffout)
358
+
359
+            # get estimated otsu threshold level
360
+            retval, _ = cv2.threshold(differenceFrame, 0, 255,
361
+                                      cv2.THRESH_BINARY+cv2.THRESH_OTSU)
362
+            # add to list of threshold levels
363
+            t_retval.append(retval)
364
+
365
+            # apply threshold based on average threshold value
366
+            if frame_no < 10:
367
+                ret2, thresholdImage = cv2.threshold(differenceFrame,
368
+                                                     int(np.mean(t_retval)*0.9),
369
+                                                     255, cv2.THRESH_BINARY)
370
+            else:
371
+                ret2, thresholdImage = cv2.threshold(differenceFrame,
372
+                                                 int(np.mean(t_retval[-10:-1])*0.9),
373
+                                                 255, cv2.THRESH_BINARY)
374
+
375
+            # We'll need to fill in the gaps to make a complete vehicle as windows
376
+            # and other features can split them!
377
+            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (SMOOTH, SMOOTH))
378
+            # Fill any small holes
379
+            thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_CLOSE, kernel)
380
+
381
+            # Remove noise
382
+            thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_OPEN, kernel)
383
+
384
+            # Dilate to merge adjacent blobs
385
+            thresholdImage = cv2.dilate(thresholdImage, kernel, iterations = 2)
386
+
387
+            # apply mask
388
+            thresholdImage = cv2.bitwise_and(thresholdImage, thresholdImage, mask = mask)
389
+#        cv2.imshow("threshold", thresholdImage)
390
+            threshout = cv2.cvtColor(thresholdImage, cv2.COLOR_GRAY2BGR)
391
+            outblob.write(threshout)
392
+
393
+            # Find contours aka blobs in the threshold image.
394
+            contours, hierarchy = cv2.findContours(thresholdImage,
395
+                                                      cv2.RETR_EXTERNAL,
396
+                                                      cv2.CHAIN_APPROX_SIMPLE)
397
+
398
+            print("Found ",len(contours)," vehicle contours.")
399
+            # process contours if they exist!
400
+            if contours:
401
+                for (i, contour) in enumerate(contours):
402
+                    # Find the bounding rectangle and center for each blob
403
+                    (x, y, w, h) = cv2.boundingRect(contour)
404
+                    contour_valid = (w > CONTOUR_WIDTH) and (h > CONTOUR_HEIGHT)
405
+
406
+                    print("Contour #",i,": pos=(x=",x,", y=",y,") size=(w=",w,
407
+                          ", h=",h,") valid=",contour_valid)
408
+
409
+                    if not contour_valid:
410
+                        continue
411
+
412
+                    center = (int(x + w/2), int(y + h/2))
413
+                    blobs.append(((x, y, w, h), center))
414
+
415
+            for (i, match) in enumerate(blobs):
416
+                contour, centroid = match
417
+                x, y, w, h = contour
418
+
419
+                # store the contour data
420
+                c = dict(
421
+                            frame_no = frame_no,
422
+                            centre_x = x,
423
+                            centre_y = y,
424
+                            width = w,
425
+                            height = h
426
+                            )
427
+                tracked_conts.append(c)
428
+
429
+                cv2.rectangle(frame, (x, y), (x + w - 1, y + h - 1), (0, 0, 255), LINE_THICKNESS)
430
+                cv2.circle(frame, centroid, 2, (0, 0, 255), -1)
431
+
432
+            if car_counter is None:
433
+                print("Creating vehicle counter...")
434
+                car_counter = VehicleCounter(frame.shape[:2], 2*frame.shape[0] / 3)
435
+
436
+            # get latest count
437
+            car_counter.update_count(blobs, frame)
438
+            current_count = car_counter.vehicle_RHS + car_counter.vehicle_LHS
439
+
440
+            # print elapsed time to console
441
+            elapsed_time = time.time()-start_time
442
+            print("-- %s seconds --" % round(elapsed_time,2))
443
+
444
+            # output video
445
+            frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)
446
+
447
+            # draw dividing line
448
+            # flash green when new car counted
449
+            if current_count > total_cars:
450
+                cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
451
+                     (0,255,0), 2*LINE_THICKNESS)
452
+            else:
453
+                cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
454
+                 (0,0,255), LINE_THICKNESS)
455
+
456
+             # update with latest count
457
+            total_cars = current_count
458
+
459
+            # draw upper limit
460
+            cv2.line(frame, (0, 100),(frame_w, 100), (0,0,0), LINE_THICKNESS)
461
+
462
+            ret, buffer = cv2.imencode('.jpg', frame)
463
+            frame2 = buffer.tobytes()
464
+            yield (b'--frame\r\n'
465
+                b'Content-Type: image/jpeg\r\n\r\n' + frame2 + b'\r\n')  # concat frame one by one and show result
466
+
467
+            #cv2.imshow("preview", frame)
468
+            #cv2.imwrite("../flask-hls-demo/static/frame.jpg", frame)
469
+            out.write(frame)
470
+
471
+            if cv2.waitKey(27) and 0xFF == ord('q'):
472
+                break
473
+        else:
474
+            break
475
+
476
+    #cv2.line()
477
+    #cv2.destroyAllWindows()
478
+    #cap.release()
479
+    #out.release()
480
+
481
+from flask import Flask, render_template, Response
482
+import cv2
483
+
484
+app = Flask(__name__)
485
+
486
+
487
+def find_camera(id):
488
+    '''
489
+    cameras = ['rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp',
490
+    'rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp']
491
+    '''
492
+    cameras = ['rtsp://admin:@Unv123456@192.168.10.252:554/unicast/c1/s1/live']
493
+    return cameras[int(id)]
494
+#  for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
495
+#  for webcam use zero(0)
496
+
497
+
498
+def gen_frames(camera_id):
499
+
500
+    cam = find_camera(camera_id)
501
+    cap=  cv2.VideoCapture(cam)
502
+
503
+    while True:
504
+        # for cap in caps:
505
+        # # Capture frame-by-frame
506
+        success, frame = cap.read()  # read the camera frame
507
+        if not success:
508
+            break
509
+        else:
510
+            ret, buffer = cv2.imencode('.jpg', frame)
511
+            frame = buffer.tobytes()
512
+            yield (b'--frame\r\n'
513
+                b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')  # concat frame one by one and show result
514
+@app.route('/video_feed/<string:id>/', methods=["GET"])
515
+def video_feed(id):
516
+
517
+    """Video streaming route. Put this in the src attribute of an img tag."""
518
+    '''
519
+    return Response(gen_frames(id),
520
+                    mimetype='multipart/x-mixed-replace; boundary=frame')
521
+    '''
522
+    return Response(process_video(),
523
+                    mimetype='multipart/x-mixed-replace; boundary=frame')
524
+
525
+@app.route('/', methods=["GET"])
526
+def index():
527
+    return render_template('index.html')
528
+
529
+
530
+if __name__ == '__main__':
531
+    app.run(debug=True, port=9099)

BIN
cap/frame0.jpg


BIN
cap/frame1.jpg


BIN
cap/frame10.jpg


BIN
cap/frame100.jpg


BIN
cap/frame101.jpg


BIN
cap/frame102.jpg


BIN
cap/frame103.jpg


BIN
cap/frame104.jpg


BIN
cap/frame105.jpg


BIN
cap/frame106.jpg


BIN
cap/frame107.jpg


BIN
cap/frame108.jpg


BIN
cap/frame109.jpg


BIN
cap/frame11.jpg


BIN
cap/frame110.jpg


BIN
cap/frame111.jpg


BIN
cap/frame112.jpg


BIN
cap/frame113.jpg


BIN
cap/frame114.jpg


BIN
cap/frame115.jpg


BIN
cap/frame116.jpg


BIN
cap/frame117.jpg


BIN
cap/frame118.jpg


BIN
cap/frame119.jpg


BIN
cap/frame12.jpg


BIN
cap/frame120.jpg


BIN
cap/frame121.jpg


BIN
cap/frame122.jpg


BIN
cap/frame123.jpg


BIN
cap/frame124.jpg


BIN
cap/frame125.jpg


BIN
cap/frame126.jpg


BIN
cap/frame127.jpg


BIN
cap/frame128.jpg


BIN
cap/frame129.jpg


BIN
cap/frame13.jpg


BIN
cap/frame130.jpg


BIN
cap/frame131.jpg


BIN
cap/frame132.jpg


BIN
cap/frame133.jpg


BIN
cap/frame134.jpg


BIN
cap/frame135.jpg


BIN
cap/frame136.jpg


BIN
cap/frame137.jpg


BIN
cap/frame138.jpg


BIN
cap/frame139.jpg


BIN
cap/frame14.jpg


BIN
cap/frame140.jpg


BIN
cap/frame141.jpg


BIN
cap/frame142.jpg


BIN
cap/frame143.jpg


BIN
cap/frame144.jpg


BIN
cap/frame145.jpg


BIN
cap/frame146.jpg


BIN
cap/frame147.jpg


BIN
cap/frame148.jpg


BIN
cap/frame149.jpg


BIN
cap/frame15.jpg


BIN
cap/frame150.jpg


BIN
cap/frame151.jpg


BIN
cap/frame152.jpg


BIN
cap/frame153.jpg


BIN
cap/frame154.jpg


BIN
cap/frame155.jpg


BIN
cap/frame156.jpg


BIN
cap/frame157.jpg


BIN
cap/frame158.jpg


BIN
cap/frame159.jpg


BIN
cap/frame16.jpg


BIN
cap/frame160.jpg


BIN
cap/frame161.jpg


BIN
cap/frame162.jpg


BIN
cap/frame163.jpg


BIN
cap/frame164.jpg


BIN
cap/frame165.jpg


BIN
cap/frame166.jpg


BIN
cap/frame167.jpg


BIN
cap/frame168.jpg


BIN
cap/frame169.jpg


BIN
cap/frame17.jpg


BIN
cap/frame170.jpg


BIN
cap/frame171.jpg


BIN
cap/frame172.jpg


BIN
cap/frame173.jpg


BIN
cap/frame174.jpg


BIN
cap/frame175.jpg


BIN
cap/frame176.jpg


BIN
cap/frame177.jpg


BIN
cap/frame178.jpg


BIN
cap/frame179.jpg


BIN
cap/frame18.jpg


BIN
cap/frame180.jpg


BIN
cap/frame181.jpg


+ 0 - 0
cap/frame182.jpg


Some files were not shown because too many files changed in this diff

tum/whitesports - Gogs: Simplico Git Service

Keine Beschreibung

class-IXR-value.php 3.7KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139
  1. <?php
  2. /**
  3. * IXR_Value
  4. *
  5. * @package IXR
  6. * @since 1.5.0
  7. */
  8. class IXR_Value {
  9. var $data;
  10. var $type;
  11. /**
  12. * PHP5 constructor.
  13. */
  14. function __construct( $data, $type = false )
  15. {
  16. $this->data = $data;
  17. if (!$type) {
  18. $type = $this->calculateType();
  19. }
  20. $this->type = $type;
  21. if ($type == 'struct') {
  22. // Turn all the values in the array in to new IXR_Value objects
  23. foreach ($this->data as $key => $value) {
  24. $this->data[$key] = new IXR_Value($value);
  25. }
  26. }
  27. if ($type == 'array') {
  28. for ($i = 0, $j = count($this->data); $i < $j; $i++) {
  29. $this->data[$i] = new IXR_Value($this->data[$i]);
  30. }
  31. }
  32. }
  33. /**
  34. * PHP4 constructor.
  35. */
  36. public function IXR_Value( $data, $type = false ) {
  37. self::__construct( $data, $type );
  38. }
  39. function calculateType()
  40. {
  41. if ($this->data === true || $this->data === false) {
  42. return 'boolean';
  43. }
  44. if (is_integer($this->data)) {
  45. return 'int';
  46. }
  47. if (is_double($this->data)) {
  48. return 'double';
  49. }
  50. // Deal with IXR object types base64 and date
  51. if (is_object($this->data) && is_a($this->data, 'IXR_Date')) {
  52. return 'date';
  53. }
  54. if (is_object($this->data) && is_a($this->data, 'IXR_Base64')) {
  55. return 'base64';
  56. }
  57. // If it is a normal PHP object convert it in to a struct
  58. if (is_object($this->data)) {
  59. $this->data = get_object_vars($this->data);
  60. return 'struct';
  61. }
  62. if (!is_array($this->data)) {
  63. return 'string';
  64. }
  65. // We have an array - is it an array or a struct?
  66. if ($this->isStruct($this->data)) {
  67. return 'struct';
  68. } else {
  69. return 'array';
  70. }
  71. }
  72. function getXml()
  73. {
  74. // Return XML for this value
  75. switch ($this->type) {
  76. case 'boolean':
  77. return '<boolean>'.(($this->data) ? '1' : '0').'</boolean>';
  78. break;
  79. case 'int':
  80. return '<int>'.$this->data.'</int>';
  81. break;
  82. case 'double':
  83. return '<double>'.$this->data.'</double>';
  84. break;
  85. case 'string':
  86. return '<string>'.htmlspecialchars($this->data).'</string>';
  87. break;
  88. case 'array':
  89. $return = '<array><data>'."\n";
  90. foreach ($this->data as $item) {
  91. $return .= ' <value>'.$item->getXml()."</value>\n";
  92. }
  93. $return .= '</data></array>';
  94. return $return;
  95. break;
  96. case 'struct':
  97. $return = '<struct>'."\n";
  98. foreach ($this->data as $name => $value) {
  99. $name = htmlspecialchars($name);
  100. $return .= " <member><name>$name</name><value>";
  101. $return .= $value->getXml()."</value></member>\n";
  102. }
  103. $return .= '</struct>';
  104. return $return;
  105. break;
  106. case 'date':
  107. case 'base64':
  108. return $this->data->getXml();
  109. break;
  110. }
  111. return false;
  112. }
  113. /**
  114. * Checks whether or not the supplied array is a struct or not
  115. *
  116. * @param array $array
  117. * @return bool
  118. */
  119. function isStruct($array)
  120. {
  121. $expected = 0;
  122. foreach ($array as $key => $value) {
  123. if ((string)$key !== (string)$expected) {
  124. return true;
  125. }
  126. $expected++;
  127. }
  128. return false;
  129. }
  130. }