Getting started with DepthAI SDK
In this tutorial, we’ll show you how to use DepthAI SDK for a couple of basic use cases, that can give you an overall idea how to use it and in which cases it might be useful.
What is DepthAI SDK?
DepthAI SDK was created on top of the regular DepthAI API. Originally, it was a part of the demo script, but over time it evolved to become a package containing many convenience methods and classes that aim to help in development process with OAK cameras.
Package is mainly made of managers, which handle different aspects of development:
Helps in setting up processing pipeline |
|
Helps in setting up neural networks |
|
Helps in displaying preview from OAK cameras |
|
Helps in creating videos from OAK cameras |
|
Helps in downloading neural networks as MyriadX blobs |
|
For FPS calculations |
|
For frame handling |
|
For various most-common tasks |
In some places, code is also adjusted for modifications - e.g. you can set up a custom handler file for neural network or pass a callback argument to a function to perform additional modifications
Example usages
The original “user” of this SDK was the demo script, where you can see how the SDK is used. Below, you can find a list of other projects that also use the SDK and are available to use as a reference
Installation
To install this package, run the following command in your terminal window
python3 -m pip install depthai-sdk
Warning
If you’re using Raspberry Pi, providing a Pi Wheels extra package url can significantly speed up the installation process by providing prebuilt binaries for OpenCV
python3 -m pip install --extra-index-url https://www.piwheels.org/simple/ depthai-sdk
Learn more
To see more details and examples on how the manager classes works specifically, we created a few simple tutorials:
For more in-depth informations about the classes and methods, please visit DepthAI SDK API
Cookbook
Below you can find various basic usages of DepthAI SDK that can be used as a starting point.
Preview color camera
1from depthai_sdk import Previews
2from depthai_sdk.managers import PipelineManager, PreviewManager
3import depthai as dai
4import cv2
5
6pm = PipelineManager()
7pm.createColorCam(xout=True)
8
9with dai.Device(pm.pipeline) as device:
10 pv = PreviewManager(display=[Previews.color.name])
11 pv.createQueues(device)
12
13 while True:
14 pv.prepareFrames()
15 pv.showFrames()
16
17 if cv2.waitKey(1) == ord('q'):
18 break
Preview color and mono cameras
1from depthai_sdk import Previews
2from depthai_sdk.managers import PipelineManager, PreviewManager
3import depthai as dai
4import cv2
5
6pm = PipelineManager()
7pm.createColorCam(xout=True)
8pm.createLeftCam(xout=True)
9pm.createRightCam(xout=True)
10
11with dai.Device(pm.pipeline) as device:
12 pv = PreviewManager(display=[Previews.color.name, Previews.left.name, Previews.right.name])
13 pv.createQueues(device)
14
15 while True:
16 pv.prepareFrames()
17 pv.showFrames()
18
19 if cv2.waitKey(1) == ord('q'):
20 break
Run MobilenetSSD on color camera
1from depthai_sdk import Previews
2from depthai_sdk.managers import PipelineManager, PreviewManager, NNetManager, BlobManager
3import depthai as dai
4import cv2
5
6pm = PipelineManager()
7pm.createColorCam(xout=True)
8
9bm = BlobManager(zooName="mobilenet-ssd")
10nm = NNetManager(inputSize=(300, 300), nnFamily="mobilenet")
11nn = nm.createNN(pipeline=pm.pipeline, nodes=pm.nodes, source=Previews.color.name,
12 blobPath=bm.getBlob(shaves=6, openvinoVersion=pm.pipeline.getOpenVINOVersion()))
13pm.addNn(nn)
14
15with dai.Device(pm.pipeline) as device:
16 pv = PreviewManager(display=[Previews.color.name])
17 pv.createQueues(device)
18 nm.createQueues(device)
19 nnData = []
20
21 while True:
22 pv.prepareFrames()
23 inNn = nm.outputQueue.tryGet()
24
25 if inNn is not None:
26 nnData = nm.decode(inNn)
27
28 nm.draw(pv, nnData)
29 pv.showFrames()
30
31 if cv2.waitKey(1) == ord('q'):
32 break
Run face-detection-retail-0004 on left camera
1from depthai_sdk import Previews
2from depthai_sdk.managers import PipelineManager, PreviewManager, NNetManager, BlobManager
3import depthai as dai
4import cv2
5
6pm = PipelineManager()
7pm.createLeftCam(xout=True)
8
9bm = BlobManager(zooName="face-detection-retail-0004")
10nm = NNetManager(inputSize=(300, 300), nnFamily="mobilenet")
11nn = nm.createNN(pipeline=pm.pipeline, nodes=pm.nodes, source=Previews.left.name,
12 blobPath=bm.getBlob(shaves=6, openvinoVersion=pm.pipeline.getOpenVINOVersion()))
13pm.addNn(nn)
14
15with dai.Device(pm.pipeline) as device:
16 pv = PreviewManager(display=[Previews.left.name])
17 pv.createQueues(device)
18 nm.createQueues(device)
19 nnData = []
20
21 while True:
22 pv.prepareFrames()
23 inNn = nm.outputQueue.tryGet()
24
25 if inNn is not None:
26 nnData = nm.decode(inNn)
27
28 nm.draw(pv, nnData)
29 pv.showFrames()
30
31 if cv2.waitKey(1) == ord('q'):
32 break
Got questions?
We’re always happy to help with code or other questions you might have.