浏览代码

process cctv

tum 3 年之前
父节点
当前提交
b24d15d6d0
共有 1 个文件被更改,包括 243 次插入1 次删除
  1. 243 1
      blobDetection.py

+ 243 - 1
blobDetection.py

@@ -478,6 +478,248 @@ def process_video():
478 478
     #cap.release()
479 479
     #out.release()
480 480
 
481
+def process_video_cctv():
482
+    global frame_no
483
+    global frame_w
484
+    inputFile = 'rtsp://admin:@Unv123456@192.168.10.252:554/unicast/c1/s1/live'
485
+    #camera = re.match(r".*/(\d+)_.*", inputFile)
486
+    camera = "uniview"
487
+
488
+# import video file
489
+    cap = cv2.VideoCapture(inputFile)
490
+
491
+# get list of background files
492
+    f = []
493
+    for (_, _, filenames) in walk(loc+"/backgrounds/"):
494
+        f.extend(filenames)
495
+        break
496
+
497
+# if background exists for camera: import, else avg will be built on fly
498
+    if camera+"_bg.jpg" in f:
499
+        bg = loc+"/backgrounds/"+camera+"_bg.jpg"
500
+        default_bg = cv2.imread(bg)
501
+        default_bg = cv2.cvtColor(default_bg, cv2.COLOR_BGR2HSV)
502
+        (_,avgSat,default_bg) = cv2.split(default_bg)
503
+        avg = default_bg.copy().astype("float")
504
+    else:
505
+        avg = None
506
+
507
+# get frame size
508
+    frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
509
+    frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
510
+
511
+# create a mask (manual for each camera)
512
+    mask = np.zeros((frame_h,frame_w), np.uint8)
513
+    mask[:,:] = 255
514
+    mask[:100, :] = 0
515
+    mask[230:, 160:190] = 0
516
+    mask[170:230,170:190] = 0
517
+    mask[140:170,176:190] = 0
518
+    mask[100:140,176:182] = 0
519
+
520
+# The cutoff for threshold. A lower number means smaller changes between
521
+# the average and current scene are more readily detected.
522
+    THRESHOLD_SENSITIVITY = 40
523
+    t_retval.append(THRESHOLD_SENSITIVITY)
524
+# Blob size limit before we consider it for tracking.
525
+    CONTOUR_WIDTH = 21
526
+    CONTOUR_HEIGHT = 16#21
527
+# The weighting to apply to "this" frame when averaging. A higher number
528
+# here means that the average scene will pick up changes more readily,
529
+# thus making the difference between average and current scenes smaller.
530
+    DEFAULT_AVERAGE_WEIGHT = 0.01
531
+    INITIAL_AVERAGE_WEIGHT = DEFAULT_AVERAGE_WEIGHT / 50
532
+# Blob smoothing function, to join 'gaps' in cars
533
+    SMOOTH = max(2,int(round((CONTOUR_WIDTH**0.5)/2,0)))
534
+# Constants for drawing on the frame.
535
+    LINE_THICKNESS = 1
536
+
537
+    fourcc = cv2.VideoWriter_fourcc(*'mp4v')
538
+    out = loc+'/outputs/'+camera+'_output.mp4'
539
+#print(out)
540
+#exit()
541
+    out = cv2.VideoWriter(out, fourcc, 20, (frame_w, frame_h))
542
+
543
+    outblob = loc+'/outputs/'+camera+'_outblob.mp4'
544
+    diffop = loc+'/outputs/'+camera+'_outdiff.mp4'
545
+    outblob = cv2.VideoWriter(outblob, fourcc, 20, (frame_w, frame_h))
546
+    diffop = cv2.VideoWriter(diffop, fourcc, 20, (frame_w, frame_h))
547
+
548
+# A list of "tracked blobs".
549
+    blobs = []
550
+    car_counter = None  # will be created later
551
+    frame_no = 0
552
+
553
+    total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
554
+    total_cars = 0
555
+
556
+    start_time = time.time()
557
+    ret, frame = cap.read()
558
+
559
+    while ret:
560
+        ret, frame = cap.read()
561
+        frame_no = frame_no + 1
562
+
563
+        if ret and frame_no < total_frames:
564
+
565
+            print("Processing frame ",frame_no)
566
+
567
+            # get returned time
568
+            frame_time = time.time()
569
+
570
+            # convert BGR to HSV
571
+            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
572
+
573
+            # only use the Value channel of the frame
574
+            (_,_,grayFrame) = cv2.split(frame)
575
+            grayFrame = cv2.bilateralFilter(grayFrame, 11, 21, 21)
576
+
577
+            if avg is None:
578
+                # Set up the average if this is the first time through.
579
+                avg = grayFrame.copy().astype("float")
580
+                continue
581
+
582
+            # Build the average scene image by accumulating this frame
583
+            # with the existing average.
584
+            if frame_no < 10:
585
+                def_wt = INITIAL_AVERAGE_WEIGHT
586
+            else:
587
+                def_wt = DEFAULT_AVERAGE_WEIGHT
588
+
589
+            cv2.accumulateWeighted(grayFrame, avg, def_wt)
590
+
591
+            # export averaged background for use in next video feed run
592
+            #if frame_no > int(total_frames * 0.975):
593
+            if frame_no > int(200):
594
+                grayOp = cv2.cvtColor(cv2.convertScaleAbs(avg), cv2.COLOR_GRAY2BGR)
595
+                backOut = loc+"/backgrounds/"+camera+"_bg.jpg"
596
+                cv2.imwrite(backOut, grayOp)
597
+
598
+            # Compute the grayscale difference between the current grayscale frame and
599
+            # the average of the scene.
600
+            differenceFrame = cv2.absdiff(grayFrame, cv2.convertScaleAbs(avg))
601
+            # blur the difference image
602
+            differenceFrame = cv2.GaussianBlur(differenceFrame, (5, 5), 0)
603
+#        cv2.imshow("difference", differenceFrame)
604
+            diffout = cv2.cvtColor(differenceFrame, cv2.COLOR_GRAY2BGR)
605
+            diffop.write(diffout)
606
+
607
+            # get estimated otsu threshold level
608
+            retval, _ = cv2.threshold(differenceFrame, 0, 255,
609
+                                      cv2.THRESH_BINARY+cv2.THRESH_OTSU)
610
+            # add to list of threshold levels
611
+            t_retval.append(retval)
612
+
613
+            # apply threshold based on average threshold value
614
+            if frame_no < 10:
615
+                ret2, thresholdImage = cv2.threshold(differenceFrame,
616
+                                                     int(np.mean(t_retval)*0.9),
617
+                                                     255, cv2.THRESH_BINARY)
618
+            else:
619
+                ret2, thresholdImage = cv2.threshold(differenceFrame,
620
+                                                 int(np.mean(t_retval[-10:-1])*0.9),
621
+                                                 255, cv2.THRESH_BINARY)
622
+
623
+            # We'll need to fill in the gaps to make a complete vehicle as windows
624
+            # and other features can split them!
625
+            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (SMOOTH, SMOOTH))
626
+            # Fill any small holes
627
+            thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_CLOSE, kernel)
628
+
629
+            # Remove noise
630
+            thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_OPEN, kernel)
631
+
632
+            # Dilate to merge adjacent blobs
633
+            thresholdImage = cv2.dilate(thresholdImage, kernel, iterations = 2)
634
+
635
+            # apply mask
636
+            thresholdImage = cv2.bitwise_and(thresholdImage, thresholdImage, mask = mask)
637
+#        cv2.imshow("threshold", thresholdImage)
638
+            threshout = cv2.cvtColor(thresholdImage, cv2.COLOR_GRAY2BGR)
639
+            outblob.write(threshout)
640
+
641
+            # Find contours aka blobs in the threshold image.
642
+            contours, hierarchy = cv2.findContours(thresholdImage,
643
+                                                      cv2.RETR_EXTERNAL,
644
+                                                      cv2.CHAIN_APPROX_SIMPLE)
645
+
646
+            print("Found ",len(contours)," vehicle contours.")
647
+            # process contours if they exist!
648
+            if contours:
649
+                for (i, contour) in enumerate(contours):
650
+                    # Find the bounding rectangle and center for each blob
651
+                    (x, y, w, h) = cv2.boundingRect(contour)
652
+                    contour_valid = (w > CONTOUR_WIDTH) and (h > CONTOUR_HEIGHT)
653
+
654
+                    print("Contour #",i,": pos=(x=",x,", y=",y,") size=(w=",w,
655
+                          ", h=",h,") valid=",contour_valid)
656
+
657
+                    if not contour_valid:
658
+                        continue
659
+
660
+                    center = (int(x + w/2), int(y + h/2))
661
+                    blobs.append(((x, y, w, h), center))
662
+
663
+            for (i, match) in enumerate(blobs):
664
+                contour, centroid = match
665
+                x, y, w, h = contour
666
+
667
+                # store the contour data
668
+                c = dict(
669
+                            frame_no = frame_no,
670
+                            centre_x = x,
671
+                            centre_y = y,
672
+                            width = w,
673
+                            height = h
674
+                            )
675
+                tracked_conts.append(c)
676
+
677
+                cv2.rectangle(frame, (x, y), (x + w - 1, y + h - 1), (0, 0, 255), LINE_THICKNESS)
678
+                cv2.circle(frame, centroid, 2, (0, 0, 255), -1)
679
+
680
+            if car_counter is None:
681
+                print("Creating vehicle counter...")
682
+                car_counter = VehicleCounter(frame.shape[:2], 2*frame.shape[0] / 3)
683
+
684
+            # get latest count
685
+            car_counter.update_count(blobs, frame)
686
+            current_count = car_counter.vehicle_RHS + car_counter.vehicle_LHS
687
+
688
+            # print elapsed time to console
689
+            elapsed_time = time.time()-start_time
690
+            print("-- %s seconds --" % round(elapsed_time,2))
691
+
692
+            # output video
693
+            frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)
694
+
695
+            # draw dividing line
696
+            # flash green when new car counted
697
+            if current_count > total_cars:
698
+                cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
699
+                     (0,255,0), 2*LINE_THICKNESS)
700
+            else:
701
+                cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
702
+                 (0,0,255), LINE_THICKNESS)
703
+
704
+             # update with latest count
705
+            total_cars = current_count
706
+
707
+            # draw upper limit
708
+            cv2.line(frame, (0, 100),(frame_w, 100), (0,0,0), LINE_THICKNESS)
709
+
710
+            ret, buffer = cv2.imencode('.jpg', frame)
711
+            frame2 = buffer.tobytes()
712
+            yield (b'--frame\r\n'
713
+                b'Content-Type: image/jpeg\r\n\r\n' + frame2 + b'\r\n')  # concat frame one by one and show result
714
+
715
+            #cv2.imshow("preview", frame)
716
+            #cv2.imwrite("../flask-hls-demo/static/frame.jpg", frame)
717
+            out.write(frame)
718
+
719
+            if cv2.waitKey(27) and 0xFF == ord('q'):
720
+                break
721
+        else:
722
+            break
481 723
 from flask import Flask, render_template, Response
482 724
 import cv2
483 725
 
@@ -519,7 +761,7 @@ def video_feed(id):
519 761
     return Response(gen_frames(id),
520 762
                     mimetype='multipart/x-mixed-replace; boundary=frame')
521 763
     '''
522
-    return Response(process_video(),
764
+    return Response(process_video_cctv(),
523 765
                     mimetype='multipart/x-mixed-replace; boundary=frame')
524 766
 
525 767
 @app.route('/', methods=["GET"])