#!/usr/bin/env python3 import cv2 import depthai as dai import numpy as np # Create pipeline pipeline = dai.Pipeline() # Define sources and outputs camRgb = pipeline.create(dai.node.ColorCamera) monoLeft = pipeline.create(dai.node.MonoCamera) monoRight = pipeline.create(dai.node.MonoCamera) edgeDetectorLeft = pipeline.create(dai.node.EdgeDetector) edgeDetectorRight = pipeline.create(dai.node.EdgeDetector) edgeDetectorRgb = pipeline.create(dai.node.EdgeDetector) xoutEdgeLeft = pipeline.create(dai.node.XLinkOut) xoutEdgeRight = pipeline.create(dai.node.XLinkOut) xoutEdgeRgb = pipeline.create(dai.node.XLinkOut) xinEdgeCfg = pipeline.create(dai.node.XLinkIn) edgeLeftStr = "edge left" edgeRightStr = "edge right" edgeRgbStr = "edge rgb" edgeCfgStr = "edge cfg" xoutEdgeLeft.setStreamName(edgeLeftStr) xoutEdgeRight.setStreamName(edgeRightStr) xoutEdgeRgb.setStreamName(edgeRgbStr) xinEdgeCfg.setStreamName(edgeCfgStr) # Properties camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) monoRight.setCamera("right") edgeDetectorRgb.setMaxOutputFrameSize(camRgb.getVideoWidth() * camRgb.getVideoHeight()) # Linking monoLeft.out.link(edgeDetectorLeft.inputImage) monoRight.out.link(edgeDetectorRight.inputImage) camRgb.video.link(edgeDetectorRgb.inputImage) edgeDetectorLeft.outputImage.link(xoutEdgeLeft.input) edgeDetectorRight.outputImage.link(xoutEdgeRight.input) edgeDetectorRgb.outputImage.link(xoutEdgeRgb.input) xinEdgeCfg.out.link(edgeDetectorLeft.inputConfig) xinEdgeCfg.out.link(edgeDetectorRight.inputConfig) xinEdgeCfg.out.link(edgeDetectorRgb.inputConfig) # Connect to device and start pipeline with dai.Device(pipeline) as device: # Output/input queues edgeLeftQueue = device.getOutputQueue(edgeLeftStr, 8, False) edgeRightQueue = device.getOutputQueue(edgeRightStr, 8, False) edgeRgbQueue = device.getOutputQueue(edgeRgbStr, 8, False) edgeCfgQueue = device.getInputQueue(edgeCfgStr) print("Switch between sobel filter kernels using keys '1' and '2'") while(True): edgeLeft = edgeLeftQueue.get() edgeRight = edgeRightQueue.get() edgeRgb = edgeRgbQueue.get() edgeLeftFrame = edgeLeft.getFrame() edgeRightFrame = edgeRight.getFrame() edgeRgbFrame = edgeRgb.getFrame() # Show the frame cv2.imshow(edgeLeftStr, edgeLeftFrame) cv2.imshow(edgeRightStr, edgeRightFrame) cv2.imshow(edgeRgbStr, edgeRgbFrame) key = cv2.waitKey(1) if key == ord('q'): break if key == ord('1'): print("Switching sobel filter kernel.") cfg = dai.EdgeDetectorConfig() sobelHorizontalKernel = [[1, 0, -1], [2, 0, -2], [1, 0, -1]] sobelVerticalKernel = [[1, 2, 1], [0, 0, 0], [-1, -2, -1]] cfg.setSobelFilterKernels(sobelHorizontalKernel, sobelVerticalKernel) edgeCfgQueue.send(cfg) if key == ord('2'): print("Switching sobel filter kernel.") cfg = dai.EdgeDetectorConfig() sobelHorizontalKernel = [[3, 0, -3], [10, 0, -10], [3, 0, -3]] sobelVerticalKernel = [[3, 10, 3], [0, 0, 0], [-3, -10, -3]] cfg.setSobelFilterKernels(sobelHorizontalKernel, sobelVerticalKernel) edgeCfgQueue.send(cfg)