i have finally managed it, here's the basis of it...
full version at https://github.com/Gordon999/Pi_Hailo_Wildlife_3
full version at https://github.com/Gordon999/Pi_Hailo_Wildlife_3
Code:
#!/usr/bin/env python3"""Based on Example module for Hailo Detection."""import argparseimport cv2from picamera2 import MappedArray, Picamera2, Previewfrom picamera2.devices import Hailofrom picamera2.encoders import H264Encoderfrom picamera2.outputs import CircularOutput2, PyavOutputfrom libcamera import controlsimport timeimport osimport datetime# detection objectsobjects = ["clock","person"]# set variablesv_width = 1456 # video widthv_height = 1088 # video heightv_length = 5 # seconds, minimum video lengthpre_frames = 5 # seconds, defines length of pre-detection bufferfps = 25 # video frame rateshow_detects = 1 # show detections on video, 1 = yes, 0 = no# initialiseUsers = []Users.append(os.getlogin())user = Users[0]h_user = "/home/" + os.getlogin( )def extract_detections(hailo_output, w, h, class_names, threshold=0.5): """Extract detections from the HailoRT-postprocess output.""" results = [] for class_id, detections in enumerate(hailo_output): for detection in detections: score = detection[4] if score >= threshold: y0, x0, y1, x1 = detection[:4] bbox = (int(x0 * w), int(y0 * h), int(x1 * w), int(y1 * h)) results.append([class_names[class_id], bbox, score]) return resultsdef draw_objects(request): global show_detects current_detections = detections if current_detections and show_detects == 1: with MappedArray(request, "main") as m: for class_name, bbox, score in current_detections: x0, y0, x1, y1 = bbox label = f"{class_name} %{int(score * 100)}" cv2.rectangle(m.array, (x0, y0), (x1, y1), (0, 255, 0, 0), 4) cv2.putText(m.array, label, (x0 + 5, y0 + 45), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 255, 0, 0), 3, cv2.LINE_AA) if __name__ == "__main__": # Parse command-line arguments. parser = argparse.ArgumentParser(description="Detection Example") parser.add_argument("-m", "--model", help="Path for the HEF model.", default="/usr/share/hailo-models/yolov8s_h8l.hef") parser.add_argument("-l", "--labels", default="/home/" + user + "/picamera2/examples/hailo/coco.txt", help="Path to a text file containing labels.") parser.add_argument("-s", "--score_thresh", type=float, default=0.5, help="Score threshold, must be a float between 0 and 1.") args = parser.parse_args() # Get the Hailo model, the input size it wants, and the size of our preview stream. with Hailo(args.model) as hailo: model_h, model_w, _ = hailo.get_input_shape() video_w, video_h = v_width,v_height # Load class names from the labels file with open(args.labels, 'r', encoding="utf-8") as f: class_names = f.read().splitlines() # The list of detected objects to draw. detections = None # Configure and start Picamera2. main = {'size': (video_w, video_h), 'format': 'YUV420'} lores = {'size': (model_w, model_h), 'format': 'RGB888'} controls2 = {'FrameRate': fps} picam2 = Picamera2() config = picam2.create_preview_configuration(main, lores=lores, controls=controls2) picam2.configure(config) encoder = H264Encoder(2000000) circular = CircularOutput2(buffer_duration_ms=5000) picam2.start_preview(Preview.QTGL, x=0, y=0, width=480, height=480) picam2.start_recording(encoder, circular) encoding = False if show_detects == 1: picam2.pre_callback = draw_objects # Process each low resolution camera frame. while True: # capture frame frame = picam2.capture_array('lores') # Run inference on the preprocessed frame results = hailo.run(frame) # Extract detections from the inference results detections = extract_detections(results, video_w, video_h, class_names, args.score_thresh) # detection for d in range(0,len(objects)): if len(detections) != 0: value = float(detections[0][2]) obj = detections[0][0] if value > args.score_thresh and value < 1 and obj == objects[d]: startrec = time.monotonic() # start recording if not encoding: now = datetime.datetime.now() timestamp = now.strftime("%y%m%d_%H%M%S") circular.open_output(PyavOutput(h_user + "/Videos/" + timestamp +".mp4")) encoding = True print("New Detection",timestamp,objects[d],timestamp + ".mp4") # save lores image cv2.imwrite(h_user + "/Pictures/" + str(timestamp) + ".jpg",frame) # stop recording if encoding and (time.monotonic() - startrec > v_length): now = datetime.datetime.now() timestamp2 = now.strftime("%y%m%d_%H%M%S") print("Stopped Record", timestamp2) circular.close_output() encoding = FalseStatistics: Posted by gordon77 — Mon Mar 17, 2025 9:00 am