mqtt: enabled: False # Set to True and add host/user/pass if you use Home Assistant go2rtc: streams: camera_1: - "ffmpeg:rtsp://:@:554/stream1#video=copy#audio=copy#audio=aac" camera_1_sub: - rtsp://:@:554/stream2 camera_2: - "ffmpeg:rtsp://:@:554/stream1#video=copy#audio=copy#audio=aac" camera_2_sub: - rtsp://:@:554/stream2 camera_3: - "ffmpeg:rtsp://:@:554/stream1#video=copy#audio=copy#audio=aac" camera_3_sub: - rtsp://:@:554/stream2 cameras: camera_1: # Change name to match your location (e.g., driveway) ffmpeg: inputs: - path: rtsp://127.0.0.1:8554/camera_1 roles: - record - path: rtsp://127.0.0.1:8554/camera_1_sub roles: - detect detect: width: 640 # stream2 is typically 640x360 height: 360 fps: 5 camera_2: ffmpeg: inputs: - path: rtsp://127.0.0.1:8554/camera_2 roles: - record - path: rtsp://127.0.0.1:8554/camera_2_sub roles: - detect detect: width: 640 height: 360 fps: 5 camera_3: ffmpeg: inputs: - path: rtsp://127.0.0.1:8554/camera_3 roles: - record - path: rtsp://127.0.0.1:8554/camera_3_sub roles: - detect detect: width: 640 height: 360 fps: 5 # Optional: Global recording settings record: enabled: True retain: days: 7 mode: all ffmpeg: hwaccel_args: preset-nvidia # If you want to use the GPU for object detection (requires Frigate + TensorRT) # Note: This is separate from decoding and requires the tensorrt detector image detectors: ov: type: openvino device: GPU # This uses Intel iGPU if available, use 'tensorrt' for Nvidia