1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 | #!/usr/bin/env python3 """ Vehicle Detector with Timestamp Logging Enhanced version: Logs timestamps for each vehicle and saves to daily log files """ import cv2 import numpy as np from picamera2 import Picamera2 import time import os from datetime import datetime, timedelta import json class VehicleDetectorLogger: def __init__(self): # Create log directory self.log_dir = "vehicle_logs" os.makedirs(self.log_dir, exist_ok=True) # Current log file self.current_log_date = datetime.now().date() self.log_file = self.get_log_file_path() # Initialize statistics self.vehicle_count = 0 self.vehicle_records = [] # Store all vehicle records self.daily_stats = self.load_daily_stats() # Detection parameters self.last_count_time = time.time() self.frame_count = 0 self.start_time = time.time() # Initialize camera print("Initializing camera...") self.picam2 = Picamera2() config = self.picam2.create_video_configuration( main={"size": (640, 480), "format": "RGB888"} ) self.picam2.configure(config) # Background subtractor self.fgbg = cv2.createBackgroundSubtractorMOG2(history=50, varThreshold=25) print("System ready!") print(f"Log file: {self.log_file}") print("-" * 50) def get_log_file_path(self): """Get log file path for current date""" date_str = datetime.now().strftime("%Y%m%d") return os.path.join(self.log_dir, f"vehicles_{date_str}.txt") def load_daily_stats(self): """Load daily statistics""" stats_file = os.path.join(self.log_dir, "daily_stats.json") if os.path.exists(stats_file): try: with open(stats_file, 'r') as f: stats = json.load(f) # Keep only last 7 days of data seven_days_ago = (datetime.now() - timedelta(days=7)).strftime("%Y%m%d") stats = {k: v for k, v in stats.items() if k >= seven_days_ago} return stats except: return {} return {} def save_daily_stats(self): """Save daily statistics""" stats_file = os.path.join(self.log_dir, "daily_stats.json") date_str = datetime.now().strftime("%Y%m%d") self.daily_stats[date_str] = { "total_vehicles": self.vehicle_count, "records_count": len(self.vehicle_records), "date": datetime.now().strftime("%Y-%m-%d") } with open(stats_file, 'w') as f: json.dump(self.daily_stats, f, indent=2) def log_vehicle(self, vehicle_id, timestamp, position=None): """Log vehicle detection to file""" # Check if need to switch to new day's log file current_date = datetime.now().date() if current_date != self.current_log_date: self.current_log_date = current_date self.log_file = self.get_log_file_path() print(f"New day! Switching to log file: {self.log_file}") # Format timestamp time_str = timestamp.strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] # Create record record = { "id": vehicle_id, "timestamp": time_str, "unix_time": timestamp.timestamp(), "date": timestamp.strftime("%Y-%m-%d"), "time": timestamp.strftime("%H:%M:%S"), "position": position } # Add to records list self.vehicle_records.append(record) # Save to text file with open(self.log_file, 'a') as f: log_line = f"[{time_str}] Vehicle #{vehicle_id:04d} detected" if position: log_line += f" at position {position}" log_line += "\n" f.write(log_line) # Also save to JSON file (for easy analysis) json_file = self.log_file.replace('.txt', '.json') with open(json_file, 'w') as f: json.dump(self.vehicle_records, f, indent=2) # Print to console print(f"[{timestamp.strftime('%H:%M:%S')}] 🚗 Vehicle #{vehicle_id:04d} detected") return record def detect_and_count(self, frame): """Detect and count vehicles""" # Convert to grayscale gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # Apply background subtraction fgmask = self.fgbg.apply(gray) # Morphological operations kernel = np.ones((5,5), np.uint8) fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_CLOSE, kernel) fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel) # Find contours contours, _ = cv2.findContours(fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) detected_vehicles = [] for contour in contours: area = cv2.contourArea(contour) if area > 1000: # Only process large enough areas x, y, w, h = cv2.boundingRect(contour) # Only focus on bottom half of screen if y > 240 and w > 50 and h > 50: # Calculate center point center_x = x + w // 2 center_y = y + h // 2 # Check if new vehicle (avoid duplicate counting) current_time = time.time() if current_time - self.last_count_time > 2.0: self.vehicle_count += 1 self.last_count_time = current_time # Log vehicle timestamp = datetime.now() position = {"x": center_x, "y": center_y, "w": w, "h": h} record = self.log_vehicle(self.vehicle_count, timestamp, position) detected_vehicles.append({ "bbox": (x, y, w, h), "center": (center_x, center_y), "area": area, "record": record }) else: # Just draw, don't count detected_vehicles.append({ "bbox": (x, y, w, h), "center": (center_x, center_y), "area": area, "record": None }) return detected_vehicles, fgmask def draw_detection_info(self, frame, vehicles, fgmask): """Draw detection information on frame""" # Draw detected vehicles for vehicle in vehicles: x, y, w, h = vehicle["bbox"] center_x, center_y = vehicle["center"] # Draw bounding box color = (0, 255, 0) if vehicle["record"] else (0, 200, 200) thickness = 2 if vehicle["record"] else 1 cv2.rectangle(frame, (x, y), (x + w, y + h), color, thickness) # Draw center point cv2.circle(frame, (center_x, center_y), 4, (0, 0, 255), -1) # If counted vehicle, show ID if vehicle["record"]: cv2.putText(frame, f"#{vehicle['record']['id']}", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) # Draw detection line detection_line_y = 320 cv2.line(frame, (0, detection_line_y), (640, detection_line_y), (0, 255, 255), 2) cv2.putText(frame, "Detection Line", (10, detection_line_y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1) # Display statistics stats_y = 30 cv2.putText(frame, f"Vehicles: {self.vehicle_count}", (10, stats_y), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2) # Display FPS self.frame_count += 1 if self.frame_count % 30 == 0: fps = self.frame_count / (time.time() - self.start_time) cv2.putText(frame, f"FPS: {fps:.1f}", (10, stats_y + 40), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 0), 2) # Display current time current_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") cv2.putText(frame, current_time, (350, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) # Display today's date date_str = datetime.now().strftime("%Y-%m-%d") cv2.putText(frame, f"Today: {date_str}", (350, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 2) # Display log status log_status = f"Log: {os.path.basename(self.log_file)}" cv2.putText(frame, log_status, (10, stats_y + 80), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (200, 200, 255), 1) return frame def generate_daily_report(self): """Generate daily report""" if not self.vehicle_records: return # Count by hour hourly_counts = {} for record in self.vehicle_records: hour = datetime.fromtimestamp(record["unix_time"]).strftime("%H:00") hourly_counts[hour] = hourly_counts.get(hour, 0) + 1 # Generate report file report_file = self.log_file.replace('.txt', '_report.txt') with open(report_file, 'w') as f: f.write("=" * 60 + "\n") f.write(f"Daily Vehicle Detection Report\n") f.write(f"Date: {datetime.now().strftime('%Y-%m-%d')}\n") f.write("=" * 60 + "\n\n") f.write(f"Total Vehicles Detected: {self.vehicle_count}\n") f.write(f"Detection Start Time: {datetime.fromtimestamp(self.start_time).strftime('%Y-%m-%d %H:%M:%S')}\n") f.write(f"Report Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n") f.write("Hourly Statistics:\n") f.write("-" * 30 + "\n") for hour in sorted(hourly_counts.keys()): f.write(f"{hour}: {hourly_counts[hour]} vehicles\n") f.write("\nDetailed Records:\n") f.write("-" * 60 + "\n") for i, record in enumerate(self.vehicle_records, 1): f.write(f"{i:3d}. [{record['time']}] Vehicle #{record['id']:04d}\n") print(f"Daily report generated: {report_file}") def run(self): """Main detection loop""" print("Vehicle Detection System Starting") print("Controls:") print(" 'q' - Quit program") print(" 'r' - Reset counter") print(" 's' - Save current frame") print(" 'p' - Generate daily report") print(" '+' - Increase sensitivity") print(" '-' - Decrease sensitivity") print("-" * 50) # Start camera self.picam2.start() time.sleep(1) # Let camera stabilize try: while True: # Capture frame frame = self.picam2.capture_array() frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # Detect and count vehicles vehicles, fgmask = self.detect_and_count(frame) # Draw detection info frame = self.draw_detection_info(frame, vehicles, fgmask) # Display frames cv2.imshow("Vehicle Detector with Logger", frame) cv2.imshow("Motion Mask", fgmask) # Handle keyboard input key = cv2.waitKey(1) & 0xFF if key == ord('q'): print("\nQuitting program") break elif key == ord('r'): self.vehicle_count = 0 self.vehicle_records.clear() self.last_count_time = time.time() self.fgbg = cv2.createBackgroundSubtractorMOG2(history=50, varThreshold=25) print("Counter reset") elif key == ord('s'): timestamp = datetime.now().strftime("%Y%m%d_%H%M%S") filename = f"snapshot_{timestamp}.jpg" cv2.imwrite(filename, frame) print(f"Frame saved: {filename}") elif key == ord('p'): self.generate_daily_report() elif key == ord('+'): # Increase sensitivity self.fgbg.setVarThreshold(max(10, self.fgbg.getVarThreshold() - 5)) print(f"Sensitivity increased: varThreshold={self.fgbg.getVarThreshold()}") elif key == ord('-'): # Decrease sensitivity self.fgbg.setVarThreshold(min(100, self.fgbg.getVarThreshold() + 5)) print(f"Sensitivity decreased: varThreshold={self.fgbg.getVarThreshold()}") except KeyboardInterrupt: print("\nUser interrupted") finally: # Cleanup resources self.picam2.stop() cv2.destroyAllWindows() # Save statistics self.save_daily_stats() # Generate final report self.generate_daily_report() # Output summary total_time = time.time() - self.start_time fps = self.frame_count / total_time if total_time > 0 else 0 print("\n" + "=" * 60) print("Detection System Stopped") print("=" * 60) print(f"Total Runtime: {total_time:.1f} seconds") print(f"Frames Processed: {self.frame_count}") print(f"Average FPS: {fps:.1f}") print(f"Total Vehicles Detected: {self.vehicle_count}") print(f"Log File: {self.log_file}") print(f"JSON Data: {self.log_file.replace('.txt', '.json')}") print("=" * 60) def main(): """Main function""" print("=" * 60) print("Raspberry Pi Vehicle Detection System - With Logging") print("=" * 60) # Create detector and run detector = VehicleDetectorLogger() detector.run() if __name__ == "__main__": main() |
Showing posts with label Python. Show all posts
Showing posts with label Python. Show all posts
Sunday, February 15, 2026
Python: Detecting the Vehicle on the real-time footage
Sunday, January 14, 2024
Python & ML: Tips
[1] __init__.
[2] 10 best annotation tools for computer vision applications. 2022-05-07
Free:
1. Make Sense: https://www.makesense.ai/
2. VGG Image Annotator: https://www.robots.ox.ac.uk/~vgg/software/via/
3. Computer Vision Annotator Tool (CVAT): https://github.com/openvinotoolkit/cvat
4. Labelme: http://labelme.csail.mit.edu/
5. Dash Doodler: https://github.com/Doodleverse/dash_doodler
6. LabelImg: https://github.com/tzutalin/labelImg
7. Label Studio: https://labelstud.io/
Paid:
8. LabelBox: https://labelbox.com/
9. Scale: https://scale.com/
10. Superannotate: https://www.superannotate.com/
[3] git.
[4] pip install -e git+https://github.com/tensorflow/examples.git#egg=TensorFlow-Examples
[5] tqdm means "progress" in Arabic.
1 2 3 4 5 6 7 8 9 10 11 12 | from tqdm import tqdm # create a list to store images images=[] # iterate over 1000 image paths for path in tqdm(image_path): # read file file=tf.io.read_file(path) # decode a png file to a tensor image=tf.image.decode_png(file, channels=3, dtype=tf.uint8) # append to the list images.append(image) |
Wednesday, January 31, 2018
Python: OpenCV
Introduction
OpenCV (Open Source Computer Vision Library) is released under a BSD license and hence it’s free for both academic and commercial use. It has C++, C, Python and Java interfaces and supports Windows, Linux, Mac OS, iOS and Android. OpenCV was designed for computational efficiency and with a strong focus on real-time applications. Written in optimized C/C++, the library can take advantage of multi-core processing. Enabled with OpenCL, it can take advantage of the hardware acceleration of the underlying heterogeneous compute platform.
Installation
Python: Intalling opencv.
Notes
[1] Color image loaded by OpenCV is in BGR mode. But Matplotlib displays in RGB mode.
References
[1] OpenCV official Site.
Friday, August 4, 2017
Python: ANACONDA Tips
Introduction
一款不错的Python IDE,来自ANACONDA(Mirror from Tsinghua)。双击启动安装程序。
修改字体参考Fig. 1。
Fig. 1
背景颜色配置参考这里,如图Fig. 2
Fig. 2
[2022-02-21]补充~
[2022-05-07]补充~
[2023-10-10]Addendum~
Install GDAL
How to Install PyTorch in Anaconda with Conda or Pip. 2023-06-07.
1 2 3 4 | conda install -c conda-forge earthengine-api conda install -c "conda-forge/label/cf201901" earthengine-api conda install -c "conda-forge/label/cf202003" earthengine-api conda install -c "conda-forge/label/gcc7" earthengine-api |
Saturday, July 22, 2017
Python: Intalling opencv
依照环境配置选择安装文件,这里环境是64 bit与Python 2.7.3,所以应选的安装文件名称是opencv_python-2.4.13.2-cp27-cp27m-win_amd64.whl。CMD的当前目录调整至安装文件所在文件夹下,键入如下命令:
1 | pip install opencv_python-2.4.13.2-cp27-cp27m-win_amd64.whl |
安装完成后,在Python Shell输入import cv2,无返回错误即表示安装成功。
Anaconda Spyder
似乎为Python IDLE (Python GUI)安装的opencv并不可以直接在Anaconda Spyder上启动。
如何为Anaconda Spyder安装额外的Package?Installing packages。这里需要运行Anaconda Prompt。
在Anaconda Prompt直接运行conda install opencv,返回如下,它提示python版本大于等于3.5(小于3.6):
可是本地的Spyder是2.7版,显然这些package都不可以安装。
转到Anaconda Cloud,搜索opencv,其中opencv(2.4.11)版本对应python2.7(Win64bit).
单击进入opencv(2.4.11)页面,在Anaconda Prompt键入如下命令:
如下,自动运行安装,可能提示需要升级部分依赖的package,Y就好。
安装结束后,在Spyder键入运行,输出2.4.11即是安装成功。
References
Python: Intalling scikit-image
依照环境配置选择安装文件,这里环境是64 bit与Python 2.7.3,所以应选的安装文件名称是scikit_image-0.13.0-cp27-cp27m-win_amd64.whl。CMD的当前目录调整至安装文件所在文件夹下,键入如下命令:
1 | pip install scikit_image-0.13.0-cp27-cp27m-win_amd64.whl |
安装完成的提示界面如下。
在Python Shell输入import skimage,无返回错误即表示安装成功。
References
[2] scikit-image GitHub.
Friday, November 25, 2016
Python: pip uninstall
Introduction
卸载功能:pip,它几乎可以卸载任何Module,但有两种例外:如此python setup.py install安装或如此python setup.py develop安装。
卸载过程,运行CMD,输入如Fig.1:
Fig. 1
如果希望避免再次确认卸载,则多添加-y,如下:
Fig. 2
References
[1] pip uninstall.
Saturday, October 15, 2016
Python+Arcpy: Points To Line
Introduction
Python+Arcpy操作Points(.shp)转换至Polyline(.shp),仔细研读Points To Line (Data Management)说明,参数说明如下:
Input_Features: The point features to be converted into lines.
Output_Feature_Class:The line feature class which will be created from the input points.
以下参数对生成闭合Polyline尤为重要(Optional)
Line_Field: Each feature in the output will be based on unique values in the Line Field.这里指出同一直线的各点应在某一Field下具有相同的数值,见Fig 1红框之内在Line1字段之下数值均为1,表示各点均在同一条Polyline。
Fig. 1
Sort_Field: By default, points used to create each output line feature will be used in the order they are found. If a different order is desired, specify a Sort Field.
Close_Line: Specifies whether output line features should be closed. True or False.
举一个例子,从含有4个point的.shp转换为1条闭合的Polyline(.shp)文件,转换输出如Fig 2。
Fig. 2
注意,输入文件需使用绝对路径,起初使用相对路径返回错误指出无此文件。
Tuesday, May 3, 2016
GDAL: Clip Rasters
Introduction
GDAL利用shapefile文件裁剪栅格影像,并配置裁剪边界之外的背景数值。注意shapefile文件与栅格影像具有完全一致的投影信息。
Example
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 | % Created by LI Xu % Version 1.0 % April 19, 2016 % Description: % Clip rasters from a shapefile boundary % If you have any question about this code, % please do not hesitate to contact me via E-mail: % jeremy456@163.com % Blog: % http://blog.sciencenet.cn/u/lixujeremy % http://lixuworld.blogspot.com/ clear; clc; % Source Directory SouDir='input'; % Destination Directory DesDir='output'; % All Input Files files=dir(fullfile(SouDir, '*.tif')); % Vector File vecpath='shapefile.shp'; [~, layername, ~]=fileparts(vecpath); % Output No Data NoData=-2; % Loop for ii=1:length(files) filename=files(ii).name; filepath=fullfile(SouDir, filename); otpath=fullfile(DesDir, filename); % gdalwarp -of GTiff -cutline INPUT.shp -cl shapefile -crop_to_cutline INPUT.tif OUTPUT.tif strcmd=['gdalwarp -of GTiff -cutline ', vecpath, ' -cl ', layername, ' -crop_to_cutline ']; strcmd=[strcmd, '-dstnodata ' num2str(NoData),' ', filepath, ' ', otpath]; py.os.system(strcmd); disp([num2str(ii), ':', filename]); end disp('************************************************'); |
References
[1] gdalwarp home page.
Subscribe to:
Comments (Atom)















