暂无描述

blobDetection.py 28KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Sat Sep 30 11:51:00 2017
  5. @author: alexdrake
  6. """
  7. import cv2
  8. import numpy as np
  9. import time
  10. import logging
  11. import math
  12. import re
  13. from os import walk
  14. import os
  15. # Vehicle_counter from Dan Maesks response on
  16. # https://stackoverflow.com/questions/36254452/counting-cars-opencv-python-issue/36274515#36274515
  17. # get working directory
  18. loc = os.path.abspath('')
  19. # Video source
  20. inputFile = loc+'/inputs/625_201709280946.mp4'
  21. #inputFile = 'rtsp://admin:@Unv123456@192.168.10.252:554/unicast/c1/s1/live'
  22. # for testing
  23. tracked_blobs = []
  24. tracked_conts = []
  25. t_retval = []
  26. frame_no = 0
  27. frame_w = 0
  28. # ============================================================================
  29. class Vehicle(object):
  30. def __init__(self, id, position):
  31. self.id = id
  32. self.positions = [position]
  33. self.frames_since_seen = 0
  34. self.frames_seen = 0
  35. self.counted = False
  36. self.vehicle_dir = 0
  37. @property
  38. def last_position(self):
  39. return self.positions[-1]
  40. @property
  41. def last_position2(self):
  42. return self.positions[-2]
  43. def add_position(self, new_position):
  44. self.positions.append(new_position)
  45. self.frames_since_seen = 0
  46. self.frames_seen += 1
  47. def draw(self, output_image):
  48. for point in self.positions:
  49. cv2.circle(output_image, point, 2, (0, 0, 255), -1)
  50. cv2.polylines(output_image, [np.int32(self.positions)]
  51. , False, (0, 0, 255), 1)
  52. # ============================================================================
  53. class VehicleCounter(object):
  54. def __init__(self, shape, divider):
  55. self.log = logging.getLogger("vehicle_counter")
  56. self.height, self.width = shape
  57. self.divider = divider
  58. self.vehicles = []
  59. self.next_vehicle_id = 0
  60. self.vehicle_count = 0
  61. self.vehicle_LHS = 0
  62. self.vehicle_RHS = 0
  63. self.max_unseen_frames = 10
  64. @staticmethod
  65. def get_vector(a, b):
  66. """Calculate vector (distance, angle in degrees) from point a to point b.
  67. Angle ranges from -180 to 180 degrees.
  68. Vector with angle 0 points straight down on the image.
  69. Values decrease in clockwise direction.
  70. """
  71. dx = float(b[0] - a[0])
  72. dy = float(b[1] - a[1])
  73. distance = math.sqrt(dx**2 + dy**2)
  74. if dy > 0:
  75. angle = math.degrees(math.atan(-dx/dy))
  76. elif dy == 0:
  77. if dx < 0:
  78. angle = 90.0
  79. elif dx > 0:
  80. angle = -90.0
  81. else:
  82. angle = 0.0
  83. else:
  84. if dx < 0:
  85. angle = 180 - math.degrees(math.atan(dx/dy))
  86. elif dx > 0:
  87. angle = -180 - math.degrees(math.atan(dx/dy))
  88. else:
  89. angle = 180.0
  90. return distance, angle, dx, dy
  91. @staticmethod
  92. def is_valid_vector(a, b):
  93. # vector is only valid if threshold distance is less than 12
  94. # and if vector deviation is less than 30 or greater than 330 degs
  95. distance, angle, _, _ = a
  96. threshold_distance = 12.0
  97. return (distance <= threshold_distance)
  98. def update_vehicle(self, vehicle, matches):
  99. # Find if any of the matches fits this vehicle
  100. for i, match in enumerate(matches):
  101. contour, centroid = match
  102. # store the vehicle data
  103. vector = self.get_vector(vehicle.last_position, centroid)
  104. # only measure angle deviation if we have enough points
  105. if vehicle.frames_seen > 2:
  106. prevVector = self.get_vector(vehicle.last_position2, vehicle.last_position)
  107. angleDev = abs(prevVector[1]-vector[1])
  108. else:
  109. angleDev = 0
  110. b = dict(
  111. id = vehicle.id,
  112. center_x = centroid[0],
  113. center_y = centroid[1],
  114. vector_x = vector[0],
  115. vector_y = vector[1],
  116. dx = vector[2],
  117. dy = vector[3],
  118. counted = vehicle.counted,
  119. frame_number = frame_no,
  120. angle_dev = angleDev
  121. )
  122. tracked_blobs.append(b)
  123. # check validity
  124. if self.is_valid_vector(vector, angleDev):
  125. vehicle.add_position(centroid)
  126. vehicle.frames_seen += 1
  127. # check vehicle direction
  128. if vector[3] > 0:
  129. # positive value means vehicle is moving DOWN
  130. vehicle.vehicle_dir = 1
  131. elif vector[3] < 0:
  132. # negative value means vehicle is moving UP
  133. vehicle.vehicle_dir = -1
  134. self.log.debug("Added match (%d, %d) to vehicle #%d. vector=(%0.2f,%0.2f)"
  135. , centroid[0], centroid[1], vehicle.id, vector[0], vector[1])
  136. return i
  137. # No matches fit...
  138. vehicle.frames_since_seen += 1
  139. self.log.debug("No match for vehicle #%d. frames_since_seen=%d"
  140. , vehicle.id, vehicle.frames_since_seen)
  141. return None
  142. def update_count(self, matches, output_image = None):
  143. self.log.debug("Updating count using %d matches...", len(matches))
  144. # First update all the existing vehicles
  145. for vehicle in self.vehicles:
  146. i = self.update_vehicle(vehicle, matches)
  147. if i is not None:
  148. del matches[i]
  149. # Add new vehicles based on the remaining matches
  150. for match in matches:
  151. contour, centroid = match
  152. new_vehicle = Vehicle(self.next_vehicle_id, centroid)
  153. self.next_vehicle_id += 1
  154. self.vehicles.append(new_vehicle)
  155. self.log.debug("Created new vehicle #%d from match (%d, %d)."
  156. , new_vehicle.id, centroid[0], centroid[1])
  157. # Count any uncounted vehicles that are past the divider
  158. for vehicle in self.vehicles:
  159. if not vehicle.counted and (((vehicle.last_position[1] > self.divider) and (vehicle.vehicle_dir == 1)) or
  160. ((vehicle.last_position[1] < self.divider) and (vehicle.vehicle_dir == -1))) and (vehicle.frames_seen > 6):
  161. vehicle.counted = True
  162. # update appropriate counter
  163. if ((vehicle.last_position[1] > self.divider) and (vehicle.vehicle_dir == 1) and (vehicle.last_position[0] >= (int(frame_w/2)-10))):
  164. self.vehicle_RHS += 1
  165. self.vehicle_count += 1
  166. elif ((vehicle.last_position[1] < self.divider) and (vehicle.vehicle_dir == -1) and (vehicle.last_position[0] <= (int(frame_w/2)+10))):
  167. self.vehicle_LHS += 1
  168. self.vehicle_count += 1
  169. self.log.debug("Counted vehicle #%d (total count=%d)."
  170. , vehicle.id, self.vehicle_count)
  171. # Optionally draw the vehicles on an image
  172. if output_image is not None:
  173. for vehicle in self.vehicles:
  174. vehicle.draw(output_image)
  175. # LHS
  176. cv2.putText(output_image, ("LH Lane: %02d" % self.vehicle_LHS), (12, 56)
  177. , cv2.FONT_HERSHEY_PLAIN, 1.2, (127,255, 255), 2)
  178. # RHS
  179. cv2.putText(output_image, ("RH Lane: %02d" % self.vehicle_RHS), (216, 56)
  180. , cv2.FONT_HERSHEY_PLAIN, 1.2, (127, 255, 255), 2)
  181. # Remove vehicles that have not been seen long enough
  182. removed = [ v.id for v in self.vehicles
  183. if v.frames_since_seen >= self.max_unseen_frames ]
  184. self.vehicles[:] = [ v for v in self.vehicles
  185. if not v.frames_since_seen >= self.max_unseen_frames ]
  186. for id in removed:
  187. self.log.debug("Removed vehicle #%d.", id)
  188. self.log.debug("Count updated, tracking %d vehicles.", len(self.vehicles))
  189. # ============================================================================
  190. def process_video():
  191. global frame_no
  192. global frame_w
  193. camera = re.match(r".*/(\d+)_.*", inputFile)
  194. camera = camera.group(1)
  195. # import video file
  196. cap = cv2.VideoCapture(inputFile)
  197. # get list of background files
  198. f = []
  199. for (_, _, filenames) in walk(loc+"/backgrounds/"):
  200. f.extend(filenames)
  201. break
  202. # if background exists for camera: import, else avg will be built on fly
  203. if camera+"_bg.jpg" in f:
  204. bg = loc+"/backgrounds/"+camera+"_bg.jpg"
  205. default_bg = cv2.imread(bg)
  206. default_bg = cv2.cvtColor(default_bg, cv2.COLOR_BGR2HSV)
  207. (_,avgSat,default_bg) = cv2.split(default_bg)
  208. avg = default_bg.copy().astype("float")
  209. else:
  210. avg = None
  211. # get frame size
  212. frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  213. frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  214. # create a mask (manual for each camera)
  215. mask = np.zeros((frame_h,frame_w), np.uint8)
  216. mask[:,:] = 255
  217. mask[:100, :] = 0
  218. mask[230:, 160:190] = 0
  219. mask[170:230,170:190] = 0
  220. mask[140:170,176:190] = 0
  221. mask[100:140,176:182] = 0
  222. # The cutoff for threshold. A lower number means smaller changes between
  223. # the average and current scene are more readily detected.
  224. THRESHOLD_SENSITIVITY = 40
  225. t_retval.append(THRESHOLD_SENSITIVITY)
  226. # Blob size limit before we consider it for tracking.
  227. CONTOUR_WIDTH = 21
  228. CONTOUR_HEIGHT = 16#21
  229. # The weighting to apply to "this" frame when averaging. A higher number
  230. # here means that the average scene will pick up changes more readily,
  231. # thus making the difference between average and current scenes smaller.
  232. DEFAULT_AVERAGE_WEIGHT = 0.01
  233. INITIAL_AVERAGE_WEIGHT = DEFAULT_AVERAGE_WEIGHT / 50
  234. # Blob smoothing function, to join 'gaps' in cars
  235. SMOOTH = max(2,int(round((CONTOUR_WIDTH**0.5)/2,0)))
  236. # Constants for drawing on the frame.
  237. LINE_THICKNESS = 1
  238. fourcc = cv2.VideoWriter_fourcc(*'mp4v')
  239. out = loc+'/outputs/'+camera+'_output.mp4'
  240. #print(out)
  241. #exit()
  242. out = cv2.VideoWriter(out, fourcc, 20, (frame_w, frame_h))
  243. outblob = loc+'/outputs/'+camera+'_outblob.mp4'
  244. diffop = loc+'/outputs/'+camera+'_outdiff.mp4'
  245. outblob = cv2.VideoWriter(outblob, fourcc, 20, (frame_w, frame_h))
  246. diffop = cv2.VideoWriter(diffop, fourcc, 20, (frame_w, frame_h))
  247. # A list of "tracked blobs".
  248. blobs = []
  249. car_counter = None # will be created later
  250. frame_no = 0
  251. total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
  252. total_cars = 0
  253. start_time = time.time()
  254. ret, frame = cap.read()
  255. while ret:
  256. ret, frame = cap.read()
  257. frame_no = frame_no + 1
  258. if ret and frame_no < total_frames:
  259. print("Processing frame ",frame_no)
  260. # get returned time
  261. frame_time = time.time()
  262. # convert BGR to HSV
  263. frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
  264. # only use the Value channel of the frame
  265. (_,_,grayFrame) = cv2.split(frame)
  266. grayFrame = cv2.bilateralFilter(grayFrame, 11, 21, 21)
  267. if avg is None:
  268. # Set up the average if this is the first time through.
  269. avg = grayFrame.copy().astype("float")
  270. continue
  271. # Build the average scene image by accumulating this frame
  272. # with the existing average.
  273. if frame_no < 10:
  274. def_wt = INITIAL_AVERAGE_WEIGHT
  275. else:
  276. def_wt = DEFAULT_AVERAGE_WEIGHT
  277. cv2.accumulateWeighted(grayFrame, avg, def_wt)
  278. # export averaged background for use in next video feed run
  279. #if frame_no > int(total_frames * 0.975):
  280. if frame_no > int(200):
  281. grayOp = cv2.cvtColor(cv2.convertScaleAbs(avg), cv2.COLOR_GRAY2BGR)
  282. backOut = loc+"/backgrounds/"+camera+"_bg.jpg"
  283. cv2.imwrite(backOut, grayOp)
  284. # Compute the grayscale difference between the current grayscale frame and
  285. # the average of the scene.
  286. differenceFrame = cv2.absdiff(grayFrame, cv2.convertScaleAbs(avg))
  287. # blur the difference image
  288. differenceFrame = cv2.GaussianBlur(differenceFrame, (5, 5), 0)
  289. # cv2.imshow("difference", differenceFrame)
  290. diffout = cv2.cvtColor(differenceFrame, cv2.COLOR_GRAY2BGR)
  291. diffop.write(diffout)
  292. # get estimated otsu threshold level
  293. retval, _ = cv2.threshold(differenceFrame, 0, 255,
  294. cv2.THRESH_BINARY+cv2.THRESH_OTSU)
  295. # add to list of threshold levels
  296. t_retval.append(retval)
  297. # apply threshold based on average threshold value
  298. if frame_no < 10:
  299. ret2, thresholdImage = cv2.threshold(differenceFrame,
  300. int(np.mean(t_retval)*0.9),
  301. 255, cv2.THRESH_BINARY)
  302. else:
  303. ret2, thresholdImage = cv2.threshold(differenceFrame,
  304. int(np.mean(t_retval[-10:-1])*0.9),
  305. 255, cv2.THRESH_BINARY)
  306. # We'll need to fill in the gaps to make a complete vehicle as windows
  307. # and other features can split them!
  308. kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (SMOOTH, SMOOTH))
  309. # Fill any small holes
  310. thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_CLOSE, kernel)
  311. # Remove noise
  312. thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_OPEN, kernel)
  313. # Dilate to merge adjacent blobs
  314. thresholdImage = cv2.dilate(thresholdImage, kernel, iterations = 2)
  315. # apply mask
  316. thresholdImage = cv2.bitwise_and(thresholdImage, thresholdImage, mask = mask)
  317. # cv2.imshow("threshold", thresholdImage)
  318. threshout = cv2.cvtColor(thresholdImage, cv2.COLOR_GRAY2BGR)
  319. outblob.write(threshout)
  320. # Find contours aka blobs in the threshold image.
  321. contours, hierarchy = cv2.findContours(thresholdImage,
  322. cv2.RETR_EXTERNAL,
  323. cv2.CHAIN_APPROX_SIMPLE)
  324. print("Found ",len(contours)," vehicle contours.")
  325. # process contours if they exist!
  326. if contours:
  327. for (i, contour) in enumerate(contours):
  328. # Find the bounding rectangle and center for each blob
  329. (x, y, w, h) = cv2.boundingRect(contour)
  330. contour_valid = (w > CONTOUR_WIDTH) and (h > CONTOUR_HEIGHT)
  331. print("Contour #",i,": pos=(x=",x,", y=",y,") size=(w=",w,
  332. ", h=",h,") valid=",contour_valid)
  333. if not contour_valid:
  334. continue
  335. center = (int(x + w/2), int(y + h/2))
  336. blobs.append(((x, y, w, h), center))
  337. for (i, match) in enumerate(blobs):
  338. contour, centroid = match
  339. x, y, w, h = contour
  340. # store the contour data
  341. c = dict(
  342. frame_no = frame_no,
  343. centre_x = x,
  344. centre_y = y,
  345. width = w,
  346. height = h
  347. )
  348. tracked_conts.append(c)
  349. cv2.rectangle(frame, (x, y), (x + w - 1, y + h - 1), (0, 0, 255), LINE_THICKNESS)
  350. cv2.circle(frame, centroid, 2, (0, 0, 255), -1)
  351. if car_counter is None:
  352. print("Creating vehicle counter...")
  353. car_counter = VehicleCounter(frame.shape[:2], 2*frame.shape[0] / 3)
  354. # get latest count
  355. car_counter.update_count(blobs, frame)
  356. current_count = car_counter.vehicle_RHS + car_counter.vehicle_LHS
  357. # print elapsed time to console
  358. elapsed_time = time.time()-start_time
  359. print("-- %s seconds --" % round(elapsed_time,2))
  360. # output video
  361. frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)
  362. # draw dividing line
  363. # flash green when new car counted
  364. if current_count > total_cars:
  365. cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
  366. (0,255,0), 2*LINE_THICKNESS)
  367. else:
  368. cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
  369. (0,0,255), LINE_THICKNESS)
  370. # update with latest count
  371. total_cars = current_count
  372. # draw upper limit
  373. cv2.line(frame, (0, 100),(frame_w, 100), (0,0,0), LINE_THICKNESS)
  374. ret, buffer = cv2.imencode('.jpg', frame)
  375. frame2 = buffer.tobytes()
  376. yield (b'--frame\r\n'
  377. b'Content-Type: image/jpeg\r\n\r\n' + frame2 + b'\r\n') # concat frame one by one and show result
  378. #cv2.imshow("preview", frame)
  379. #cv2.imwrite("../flask-hls-demo/static/frame.jpg", frame)
  380. out.write(frame)
  381. if cv2.waitKey(27) and 0xFF == ord('q'):
  382. break
  383. else:
  384. break
  385. #cv2.line()
  386. #cv2.destroyAllWindows()
  387. #cap.release()
  388. #out.release()
  389. def process_video_cctv():
  390. global frame_no
  391. global frame_w
  392. inputFile = 'rtsp://admin:@Unv123456@192.168.10.252:554/unicast/c1/s1/live'
  393. #inputFile = '../flask-hls-demo/video/cctv.m3u8'
  394. #inputFile = 'http://localhost:5000/video/cctv.m3u8'
  395. #camera = re.match(r".*/(\d+)_.*", inputFile)
  396. camera = "uniview"
  397. # import video file
  398. #print(cv2.getBuildInformation())
  399. cap = cv2.VideoCapture(inputFile)
  400. #print(cap)
  401. # get list of background files
  402. f = []
  403. for (_, _, filenames) in walk(loc+"/backgrounds/"):
  404. f.extend(filenames)
  405. break
  406. # if background exists for camera: import, else avg will be built on fly
  407. if camera+"_bg.jpg" in f:
  408. bg = loc+"/backgrounds/"+camera+"_bg.jpg"
  409. default_bg = cv2.imread(bg)
  410. default_bg = cv2.cvtColor(default_bg, cv2.COLOR_BGR2HSV)
  411. (_,avgSat,default_bg) = cv2.split(default_bg)
  412. avg = default_bg.copy().astype("float")
  413. else:
  414. avg = None
  415. # get frame size
  416. frame_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  417. frame_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  418. # create a mask (manual for each camera)
  419. mask = np.zeros((frame_h,frame_w), np.uint8)
  420. mask[:,:] = 255
  421. mask[:100, :] = 0
  422. mask[230:, 160:190] = 0
  423. mask[170:230,170:190] = 0
  424. mask[140:170,176:190] = 0
  425. mask[100:140,176:182] = 0
  426. # The cutoff for threshold. A lower number means smaller changes between
  427. # the average and current scene are more readily detected.
  428. THRESHOLD_SENSITIVITY = 40
  429. t_retval.append(THRESHOLD_SENSITIVITY)
  430. # Blob size limit before we consider it for tracking.
  431. CONTOUR_WIDTH = 21
  432. CONTOUR_HEIGHT = 16#21
  433. # The weighting to apply to "this" frame when averaging. A higher number
  434. # here means that the average scene will pick up changes more readily,
  435. # thus making the difference between average and current scenes smaller.
  436. DEFAULT_AVERAGE_WEIGHT = 0.01
  437. INITIAL_AVERAGE_WEIGHT = DEFAULT_AVERAGE_WEIGHT / 50
  438. # Blob smoothing function, to join 'gaps' in cars
  439. SMOOTH = max(2,int(round((CONTOUR_WIDTH**0.5)/2,0)))
  440. # Constants for drawing on the frame.
  441. LINE_THICKNESS = 1
  442. fourcc = cv2.VideoWriter_fourcc(*'mp4v')
  443. out = loc+'/outputs/'+camera+'_output.mp4'
  444. #print(out)
  445. #exit()
  446. out = cv2.VideoWriter(out, fourcc, 20, (frame_w, frame_h))
  447. outblob = loc+'/outputs/'+camera+'_outblob.mp4'
  448. diffop = loc+'/outputs/'+camera+'_outdiff.mp4'
  449. outblob = cv2.VideoWriter(outblob, fourcc, 20, (frame_w, frame_h))
  450. diffop = cv2.VideoWriter(diffop, fourcc, 20, (frame_w, frame_h))
  451. # A list of "tracked blobs".
  452. blobs = []
  453. car_counter = None # will be created later
  454. frame_no = 0
  455. total_frames = cap.get(cv2.CAP_PROP_FRAME_COUNT)
  456. total_cars = 0
  457. start_time = time.time()
  458. ret, frame = cap.read()
  459. while ret:
  460. ret, frame = cap.read()
  461. frame_no = frame_no + 1
  462. if ret and frame_no < total_frames:
  463. print("Processing frame ",frame_no)
  464. # get returned time
  465. frame_time = time.time()
  466. # convert BGR to HSV
  467. frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
  468. # only use the Value channel of the frame
  469. (_,_,grayFrame) = cv2.split(frame)
  470. grayFrame = cv2.bilateralFilter(grayFrame, 11, 21, 21)
  471. if avg is None:
  472. # Set up the average if this is the first time through.
  473. avg = grayFrame.copy().astype("float")
  474. continue
  475. # Build the average scene image by accumulating this frame
  476. # with the existing average.
  477. if frame_no < 10:
  478. def_wt = INITIAL_AVERAGE_WEIGHT
  479. else:
  480. def_wt = DEFAULT_AVERAGE_WEIGHT
  481. cv2.accumulateWeighted(grayFrame, avg, def_wt)
  482. # export averaged background for use in next video feed run
  483. #if frame_no > int(total_frames * 0.975):
  484. if frame_no > int(200):
  485. grayOp = cv2.cvtColor(cv2.convertScaleAbs(avg), cv2.COLOR_GRAY2BGR)
  486. backOut = loc+"/backgrounds/"+camera+"_bg.jpg"
  487. cv2.imwrite(backOut, grayOp)
  488. # Compute the grayscale difference between the current grayscale frame and
  489. # the average of the scene.
  490. differenceFrame = cv2.absdiff(grayFrame, cv2.convertScaleAbs(avg))
  491. # blur the difference image
  492. differenceFrame = cv2.GaussianBlur(differenceFrame, (5, 5), 0)
  493. # cv2.imshow("difference", differenceFrame)
  494. diffout = cv2.cvtColor(differenceFrame, cv2.COLOR_GRAY2BGR)
  495. diffop.write(diffout)
  496. # get estimated otsu threshold level
  497. retval, _ = cv2.threshold(differenceFrame, 0, 255,
  498. cv2.THRESH_BINARY+cv2.THRESH_OTSU)
  499. # add to list of threshold levels
  500. t_retval.append(retval)
  501. # apply threshold based on average threshold value
  502. if frame_no < 10:
  503. ret2, thresholdImage = cv2.threshold(differenceFrame,
  504. int(np.mean(t_retval)*0.9),
  505. 255, cv2.THRESH_BINARY)
  506. else:
  507. ret2, thresholdImage = cv2.threshold(differenceFrame,
  508. int(np.mean(t_retval[-10:-1])*0.9),
  509. 255, cv2.THRESH_BINARY)
  510. # We'll need to fill in the gaps to make a complete vehicle as windows
  511. # and other features can split them!
  512. kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (SMOOTH, SMOOTH))
  513. # Fill any small holes
  514. thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_CLOSE, kernel)
  515. # Remove noise
  516. thresholdImage = cv2.morphologyEx(thresholdImage, cv2.MORPH_OPEN, kernel)
  517. # Dilate to merge adjacent blobs
  518. thresholdImage = cv2.dilate(thresholdImage, kernel, iterations = 2)
  519. # apply mask
  520. thresholdImage = cv2.bitwise_and(thresholdImage, thresholdImage, mask = mask)
  521. # cv2.imshow("threshold", thresholdImage)
  522. threshout = cv2.cvtColor(thresholdImage, cv2.COLOR_GRAY2BGR)
  523. outblob.write(threshout)
  524. # Find contours aka blobs in the threshold image.
  525. contours, hierarchy = cv2.findContours(thresholdImage,
  526. cv2.RETR_EXTERNAL,
  527. cv2.CHAIN_APPROX_SIMPLE)
  528. print("Found ",len(contours)," vehicle contours.")
  529. # process contours if they exist!
  530. if contours:
  531. for (i, contour) in enumerate(contours):
  532. # Find the bounding rectangle and center for each blob
  533. (x, y, w, h) = cv2.boundingRect(contour)
  534. contour_valid = (w > CONTOUR_WIDTH) and (h > CONTOUR_HEIGHT)
  535. print("Contour #",i,": pos=(x=",x,", y=",y,") size=(w=",w,
  536. ", h=",h,") valid=",contour_valid)
  537. if not contour_valid:
  538. continue
  539. center = (int(x + w/2), int(y + h/2))
  540. blobs.append(((x, y, w, h), center))
  541. for (i, match) in enumerate(blobs):
  542. contour, centroid = match
  543. x, y, w, h = contour
  544. # store the contour data
  545. c = dict(
  546. frame_no = frame_no,
  547. centre_x = x,
  548. centre_y = y,
  549. width = w,
  550. height = h
  551. )
  552. tracked_conts.append(c)
  553. cv2.rectangle(frame, (x, y), (x + w - 1, y + h - 1), (0, 0, 255), LINE_THICKNESS)
  554. cv2.circle(frame, centroid, 2, (0, 0, 255), -1)
  555. if car_counter is None:
  556. print("Creating vehicle counter...")
  557. car_counter = VehicleCounter(frame.shape[:2], 2*frame.shape[0] / 3)
  558. # get latest count
  559. car_counter.update_count(blobs, frame)
  560. current_count = car_counter.vehicle_RHS + car_counter.vehicle_LHS
  561. # print elapsed time to console
  562. elapsed_time = time.time()-start_time
  563. print("-- %s seconds --" % round(elapsed_time,2))
  564. # output video
  565. frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)
  566. # draw dividing line
  567. # flash green when new car counted
  568. if current_count > total_cars:
  569. cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
  570. (0,255,0), 2*LINE_THICKNESS)
  571. else:
  572. cv2.line(frame, (0, int(2*frame_h/3)),(frame_w, int(2*frame_h/3)),
  573. (0,0,255), LINE_THICKNESS)
  574. # update with latest count
  575. total_cars = current_count
  576. # draw upper limit
  577. cv2.line(frame, (0, 100),(frame_w, 100), (0,0,0), LINE_THICKNESS)
  578. ret, buffer = cv2.imencode('.jpg', frame)
  579. frame2 = buffer.tobytes()
  580. yield (b'--frame\r\n'
  581. b'Content-Type: image/jpeg\r\n\r\n' + frame2 + b'\r\n') # concat frame one by one and show result
  582. #cv2.imshow("preview", frame)
  583. #cv2.imwrite("../flask-hls-demo/static/frame.jpg", frame)
  584. out.write(frame)
  585. if cv2.waitKey(27) and 0xFF == ord('q'):
  586. break
  587. else:
  588. break
  589. from flask import Flask, render_template, Response
  590. import cv2
  591. app = Flask(__name__)
  592. def find_camera(id):
  593. '''
  594. cameras = ['rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp',
  595. 'rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp']
  596. '''
  597. cameras = ['rtsp://admin:@Unv123456@192.168.10.252:554/unicast/c1/s1/live']
  598. return cameras[int(id)]
  599. # for cctv camera use rtsp://username:password@ip_address:554/user=username_password='password'_channel=channel_number_stream=0.sdp' instead of camera
  600. # for webcam use zero(0)
  601. def gen_frames(camera_id):
  602. cam = find_camera(camera_id)
  603. cap= cv2.VideoCapture(cam)
  604. while True:
  605. # for cap in caps:
  606. # # Capture frame-by-frame
  607. success, frame = cap.read() # read the camera frame
  608. if not success:
  609. break
  610. else:
  611. ret, buffer = cv2.imencode('.jpg', frame)
  612. frame = buffer.tobytes()
  613. yield (b'--frame\r\n'
  614. b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n') # concat frame one by one and show result
  615. @app.route('/video_feed/<string:id>/', methods=["GET"])
  616. def video_feed(id):
  617. """Video streaming route. Put this in the src attribute of an img tag."""
  618. '''
  619. return Response(gen_frames(id),
  620. mimetype='multipart/x-mixed-replace; boundary=frame')
  621. '''
  622. return Response(process_video(),
  623. mimetype='multipart/x-mixed-replace; boundary=frame')
  624. @app.route('/', methods=["GET"])
  625. def index():
  626. return render_template('index.html')
  627. if __name__ == '__main__':
  628. app.run(debug=True, port=9099)