import time import depthai as dai from rerun_node import RerunNode # Create pipeline with dai.Pipeline() as p: fps = 30 width = 640 height = 400 # Define sources and outputs left = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_B, sensorFps=fps) right = p.create(dai.node.Camera).build(dai.CameraBoardSocket.CAM_C, sensorFps=fps) imu = p.create(dai.node.IMU) stereo = p.create(dai.node.StereoDepth) featureTracker = p.create(dai.node.FeatureTracker) odom = p.create(dai.node.RTABMapVIO) rerunViewer = RerunNode() imu.enableIMUSensor([dai.IMUSensor.ACCELEROMETER_RAW, dai.IMUSensor.GYROSCOPE_RAW], 200) imu.setBatchReportThreshold(1) imu.setMaxBatchReports(10) featureTracker.setHardwareResources(1,2) featureTracker.initialConfig.setCornerDetector(dai.FeatureTrackerConfig.CornerDetector.Type.HARRIS) featureTracker.initialConfig.setNumTargetFeatures(1000) featureTracker.initialConfig.setMotionEstimator(False) featureTracker.initialConfig.FeatureMaintainer.minimumDistanceBetweenFeatures = 49 stereo.setExtendedDisparity(False) stereo.setLeftRightCheck(True) stereo.setRectifyEdgeFillColor(0) stereo.enableDistortionCorrection(True) stereo.initialConfig.setLeftRightCheckThreshold(10) stereo.setDepthAlign(dai.CameraBoardSocket.CAM_B) # Linking left.requestOutput((width, height)).link(stereo.left) right.requestOutput((width, height)).link(stereo.right) stereo.rectifiedLeft.link(featureTracker.inputImage) featureTracker.passthroughInputImage.link(odom.rect) stereo.depth.link(odom.depth) featureTracker.outputFeatures.link(odom.features) imu.out.link(odom.imu) odom.passthroughRect.link(rerunViewer.inputImg) odom.transform.link(rerunViewer.inputTrans) p.start() while p.isRunning(): time.sleep(1)