Skip to content
Merged
Show file tree
Hide file tree
Changes from 19 commits
Commits
Show all changes
70 commits
Select commit Hold shift + click to select a range
017beae
Initial pointcloud implementation
Jun 15, 2023
9c6a5ce
Merge remote-tracking branch 'origin/develop' into HEAD
Jun 15, 2023
70b821a
Update core
Jun 15, 2023
be16a68
Merge remote-tracking branch 'origin/develop' into HEAD
Jul 5, 2023
855e020
Merge pull request #939 from luxonis/main
moratom Dec 14, 2023
a6963c9
Merge branch 'develop' of github.com:luxonis/depthai-python into poin…
asahtik Dec 20, 2023
1da4317
Fixed large buffer processing in MessageGroups
asahtik Jan 8, 2024
fbb9e3d
Added PointCloudDataBindings
asahtik Jan 8, 2024
4516699
Bump core
asahtik Jan 8, 2024
ac9ea49
Merge pull request #945 from luxonis/msg_grp_bugfix
asahtik Jan 10, 2024
369c01a
clangformat
asahtik Jan 10, 2024
0517a4d
Run HIL tests after all wheels are available
jakgra Jan 10, 2024
4ff848c
Make notify hil workflow depend only on builds it needs
jakgra Jan 15, 2024
845880b
Bugfixes, added python pointcloud visualization example with open3d
asahtik Jan 15, 2024
50a6485
Update core
Jan 15, 2024
8545d25
Update core
Jan 15, 2024
8c1a729
Merge pull request #956 from luxonis/bootloader_watchdog_bugfix
moratom Jan 15, 2024
4802dfa
Merge pull request #950 from luxonis/run_hil_workflow_after_build
jakgra Jan 16, 2024
54e8575
Added rgb pointcloud capability
asahtik Jan 17, 2024
c21f783
Revert "Added rgb pointcloud capability"
asahtik Jan 18, 2024
fba5559
Bump core
asahtik Jan 18, 2024
e357a76
Revert shared
asahtik Jan 22, 2024
d1d3754
Bump core
asahtik Jan 22, 2024
dbb1d9a
Sparse pointcloud bindings
asahtik Jan 22, 2024
c6c2032
Bump core
asahtik Jan 22, 2024
72b0b72
[RVC2] Added getStereoPairs and getAvailableStereoPairs API (#959)
zrezke Jan 24, 2024
30630ef
Added c++ tests for pointcloud
asahtik Jan 26, 2024
bea383e
Add intensity output
whoactuallycares Jan 31, 2024
adbd8c7
Bump core
asahtik Feb 1, 2024
36abd80
Merge pull request #967 from luxonis/tof_intensity
whoactuallycares Feb 2, 2024
cbe0c76
Added ability to set the lens position via a float, to enable a more …
zrezke Feb 5, 2024
2a4bff2
Update depthai-core
whoactuallycares Feb 9, 2024
ba29e0a
Merge pull request #977 from luxonis/tof_intensity
moratom Feb 9, 2024
99fca06
FW: fix CAM_C failing to stream in certain cases,
alex-luxonis Feb 13, 2024
c682f16
Added the initial version of the depthai binary. (#979)
zrezke Feb 15, 2024
c014e27
OAK-T Support (#957)
zrezke Feb 20, 2024
53601fb
Merge branch 'develop' of github.com:luxonis/depthai-python into poin…
asahtik Feb 21, 2024
393aa6d
Example fixes & improvements, bump fw
asahtik Feb 22, 2024
57c36aa
Clangformat
asahtik Feb 22, 2024
9f67ed8
Bugfix
asahtik Feb 22, 2024
8b8a4da
Default cam test (#845)
zrezke Feb 25, 2024
ebca0f2
Reduce amount of pointcloud copying
asahtik Feb 26, 2024
ee001db
Make cam_test.py available through the depthai binary. (#983)
zrezke Feb 26, 2024
b592eeb
Fix tests
asahtik Feb 26, 2024
4af09fd
Add colorized example for pointcloud
Feb 26, 2024
8bfa675
Implemented transformation matrix for pointcloud
asahtik Feb 28, 2024
daa90fb
Merge branch 'pointcloud' of github.com:luxonis/depthai-python into p…
asahtik Feb 28, 2024
1d8d790
Merge pull request #961 from luxonis/pointcloud
asahtik Feb 29, 2024
5248efe
Fixed windows build
asahtik Mar 4, 2024
3dcae95
Optimized getPoints
asahtik Mar 4, 2024
42c3a2d
Minor fixes
asahtik Mar 4, 2024
6a26eee
Enhance camera features (#987)
zrezke Mar 4, 2024
00f0d32
FW: -fix `setAutoExposureLimit` flicker during AF lens move,
alex-luxonis Mar 4, 2024
9cb7719
Merge pull request #989 from luxonis/pcl_fix_build
moratom Mar 4, 2024
e37db4f
Improve the pointcloud example to work with 30FPS
Mar 5, 2024
b505473
Change the example to camel case for consistency
Mar 5, 2024
50e3683
Minor fixes
Mar 5, 2024
ae4fa01
Merge pull request #991 from luxonis/improve_pointcloud_example
moratom Mar 5, 2024
7b33b42
Merge pull request #990 from luxonis/main
moratom Mar 5, 2024
37f6553
Print a nice error of Open3D is not installed
Mar 5, 2024
11b925f
Update the handling for uninstalled open3d
Mar 5, 2024
a2e419e
Merge pull request #992 from luxonis/improve_pointcloud_example
moratom Mar 5, 2024
e13e231
Bump version to 2.25.0.0
Mar 6, 2024
d3917a7
Add manylinux wheels for 32 bit arm
Mar 6, 2024
a03c585
Add only the wheel for bookworm
Mar 6, 2024
8402008
Don't install global packages
Mar 6, 2024
5a8a85d
Add missing PCL API & change th RH system
Mar 7, 2024
a579364
Update FW to develop
Mar 8, 2024
2b24633
Merge remote-tracking branch 'origin/main' into release_v2.25.0.0
Mar 8, 2024
0465260
Update core to main
Mar 8, 2024
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -556,8 +556,8 @@ jobs:
# event-type: python-hil-event
# client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}"}'

notify_hil_workflow_linux_x86_64:
needs: [build-linux-x86_64]
notify_hil_workflow:
needs: [build-linux-armhf, build-linux-x86_64]
runs-on: ubuntu-latest
steps:
- name: Dispatch an action and get the run ID
Expand All @@ -573,4 +573,4 @@ jobs:
workflow_timeout_seconds: 300 # was 120 Default: 300

- name: Release
run: echo "https://github.com/luxonis/depthai-core-hil-tests/actions/runs/${{steps.return_dispatch.outputs.run_id}}" >> $GITHUB_STEP_SUMMARY
run: echo "https://github.com/luxonis/depthai-core-hil-tests/actions/runs/${{steps.return_dispatch.outputs.run_id}}" >> $GITHUB_STEP_SUMMARY
Empty file added depthai_cli/__init__.py
Empty file.
20 changes: 20 additions & 0 deletions depthai_cli/depthai.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
def cli():
import argparse
import sys
import depthai as dai

parser = argparse.ArgumentParser(description="DepthAI CLI")
parser.add_argument("-v", "--version", action="store_true", help="Print version and exit.")
parser.add_argument("-l", "--list-devices", action="store_true", help="List connected devices.")

args = parser.parse_args()
if args.version:
print(dai.__version__)
elif args.list_devices:
print(dai.Device.getAllConnectedDevices())
elif len(sys.argv) == 1:
parser.print_help()


if __name__ == "__main__":
cli()
107 changes: 107 additions & 0 deletions examples/Camera/thermal_cam.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
import depthai as dai
import cv2
import numpy as np

mouseX, mouseY = 0, 0


def onMouse(event, x, y, *args):
global mouseX, mouseY
mouseX = x
mouseY = y


device = dai.Device()
pipeline = dai.Pipeline()

# Thermal camera
thermalCam = pipeline.create(dai.node.Camera)
width, height = -1, -1
thermalFound = False
for features in device.getConnectedCameraFeatures():
if dai.CameraSensorType.THERMAL in features.supportedTypes:
thermalFound = True
thermalCam.setBoardSocket(features.socket)
width, height = features.width, features.height
break
if not thermalFound:
raise RuntimeError("No thermal camera found!")
thermalCam.setPreviewSize(width, height)

# Output raw: FP16 temperature data (degrees Celsius)
xoutRaw = pipeline.create(dai.node.XLinkOut)
xoutRaw.setStreamName("thermal_raw")
thermalCam.raw.link(xoutRaw.input)

# Output preview,video, isp: RGB or NV12 or YUV420 thermal image.
xoutImage = pipeline.create(dai.node.XLinkOut)
xoutImage.setStreamName("image")
thermalCam.preview.link(xoutImage.input)
device.startPipeline(pipeline)

qRaw = device.getOutputQueue("thermal_raw", 2, False)
qImage = device.getOutputQueue("image", 2, False)


RAW_WINDOW_NAME = "temperature"
IMAGE_WINDOW_NAME = "image"
# Scale 4x and position one next to another
cv2.namedWindow(RAW_WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.namedWindow(IMAGE_WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.moveWindow(RAW_WINDOW_NAME, 0, 0)
cv2.resizeWindow(RAW_WINDOW_NAME, width * 4, height * 4)
cv2.moveWindow(IMAGE_WINDOW_NAME, width * 4, 0)
cv2.resizeWindow(IMAGE_WINDOW_NAME, width * 4, height * 4)
cv2.setMouseCallback(RAW_WINDOW_NAME, onMouse)
cv2.setMouseCallback(IMAGE_WINDOW_NAME, onMouse)

while True:
inRaw = qRaw.get()
inImg = qImage.get()

# Retrieve one point of fp16 data
frame = inRaw.getCvFrame().astype(np.float32)
colormappedFrame = cv2.normalize(frame, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
colormappedFrame = cv2.applyColorMap(colormappedFrame, cv2.COLORMAP_MAGMA)
if (
mouseX < 0
or mouseY < 0
or mouseX >= frame.shape[1]
or mouseY >= frame.shape[0]
):
mouseX = max(0, min(mouseX, frame.shape[1] - 1))
mouseY = max(0, min(mouseY, frame.shape[0] - 1))
textColor = (255, 255, 255)
# Draw crosshair
cv2.line(
colormappedFrame,
(mouseX - 10, mouseY),
(mouseX + 10, mouseY),
textColor,
1,
)
cv2.line(
colormappedFrame,
(mouseX, mouseY - 10),
(mouseX, mouseY + 10),
textColor,
1,
)
# Draw deg C
text = f"{frame[mouseY, mouseX]:.2f} deg C"
putTextLeft = mouseX > colormappedFrame.shape[1] / 2
cv2.putText(
colormappedFrame,
text,
(mouseX - 100 if putTextLeft else mouseX + 10, mouseY - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
textColor,
1,
)
cv2.imshow(RAW_WINDOW_NAME, colormappedFrame)

cv2.imshow(IMAGE_WINDOW_NAME, inImg.getCvFrame())

if cv2.waitKey(1) == ord("q"):
break
6 changes: 6 additions & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,7 @@ def build_extension(self, ext):
cmdclass={
'build_ext': CMakeBuild
},
packages=["depthai_cli"],
zip_safe=False,
classifiers=[
"Development Status :: 4 - Beta",
Expand All @@ -234,4 +235,9 @@ def build_extension(self, ext):
"Topic :: Software Development",
],
python_requires='>=3.6',
entry_points={
"console_scripts": [
'depthai=depthai_cli.depthai:cli'
]
}
)
2 changes: 2 additions & 0 deletions src/DeviceBindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -614,6 +614,8 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){
.def("getConnectionInterfaces", [](DeviceBase& d) { py::gil_scoped_release release; return d.getConnectionInterfaces(); }, DOC(dai, DeviceBase, getConnectionInterfaces))
.def("getConnectedCameraFeatures", [](DeviceBase& d) { py::gil_scoped_release release; return d.getConnectedCameraFeatures(); }, DOC(dai, DeviceBase, getConnectedCameraFeatures))
.def("getCameraSensorNames", [](DeviceBase& d) { py::gil_scoped_release release; return d.getCameraSensorNames(); }, DOC(dai, DeviceBase, getCameraSensorNames))
.def("getStereoPairs", [](DeviceBase& d) { py::gil_scoped_release release; return d.getStereoPairs(); }, DOC(dai, DeviceBase, getStereoPairs))
.def("getAvailableStereoPairs", [](DeviceBase& d) { py::gil_scoped_release release; return d.getAvailableStereoPairs(); }, DOC(dai, DeviceBase, getAvailableStereoPairs))
.def("getConnectedIMU", [](DeviceBase& d) { py::gil_scoped_release release; return d.getConnectedIMU(); }, DOC(dai, DeviceBase, getConnectedIMU))
.def("getIMUFirmwareVersion", [](DeviceBase& d) { py::gil_scoped_release release; return d.getIMUFirmwareVersion(); }, DOC(dai, DeviceBase, getIMUFirmwareVersion))
.def("getEmbeddedIMUFirmwareVersion", [](DeviceBase& d) { py::gil_scoped_release release; return d.getEmbeddedIMUFirmwareVersion(); }, DOC(dai, DeviceBase, getEmbeddedIMUFirmwareVersion))
Expand Down
16 changes: 16 additions & 0 deletions src/pipeline/CommonBindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,13 +23,15 @@
#include "depthai-shared/common/DetectionParserOptions.hpp"
#include "depthai-shared/common/RotatedRect.hpp"
#include "depthai-shared/common/Rect.hpp"
#include "depthai-shared/common/StereoPair.hpp"
#include "depthai-shared/common/Colormap.hpp"
#include "depthai-shared/common/FrameEvent.hpp"
#include "depthai-shared/common/Interpolation.hpp"

// depthai
#include "depthai/common/CameraFeatures.hpp"
#include "depthai/common/CameraExposureOffset.hpp"
#include "depthai/common/StereoPair.hpp"
#include "depthai/utility/ProfilingData.hpp"

void CommonBindings::bind(pybind11::module& m, void* pCallstack){
Expand Down Expand Up @@ -61,6 +63,7 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){
py::class_<DetectionParserOptions> detectionParserOptions(m, "DetectionParserOptions", DOC(dai, DetectionParserOptions));
py::class_<RotatedRect> rotatedRect(m, "RotatedRect", DOC(dai, RotatedRect));
py::class_<Rect> rect(m, "Rect", DOC(dai, Rect));
py::class_<StereoPair> stereoPair(m, "StereoPair", DOC(dai, StereoPair));
py::enum_<CameraExposureOffset> cameraExposureOffset(m, "CameraExposureOffset");
py::enum_<Colormap> colormap(m, "Colormap", DOC(dai, Colormap));
py::enum_<FrameEvent> frameEvent(m, "FrameEvent", DOC(dai, FrameEvent));
Expand Down Expand Up @@ -108,6 +111,19 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){
.def_readwrite("height", &Rect::height)
;

stereoPair
.def(py::init<>())
.def_readwrite("left", &StereoPair::left)
.def_readwrite("right", &StereoPair::right)
.def_readwrite("baseline", &StereoPair::baseline)
.def_readwrite("isVertical", &StereoPair::isVertical)
.def("__repr__", [](StereoPair& stereoPair) {
std::stringstream stream;
stream << stereoPair;
return stream.str();
})
;

timestamp
.def(py::init<>())
.def_readwrite("sec", &Timestamp::sec)
Expand Down
3 changes: 3 additions & 0 deletions src/pipeline/datatype/CameraControlBindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,7 @@ std::vector<const char *> camCtrlAttr;
.def_readwrite("cmdMask", &RawCameraControl::cmdMask)
.def_readwrite("autoFocusMode", &RawCameraControl::autoFocusMode)
.def_readwrite("lensPosition", &RawCameraControl::lensPosition)
.def_readwrite("lensPositionRaw", &RawCameraControl::lensPositionRaw)
.def_readwrite("expManual", &RawCameraControl::expManual)
.def_readwrite("afRegion", &RawCameraControl::afRegion)
.def_readwrite("awbMode", &RawCameraControl::awbMode)
Expand Down Expand Up @@ -218,6 +219,7 @@ std::vector<const char *> camCtrlAttr;
.def("setAutoFocusLensRange", &CameraControl::setAutoFocusLensRange, py::arg("infinityPosition"), py::arg("macroPosition"), DOC(dai, CameraControl, setAutoFocusLensRange))
.def("setAutoFocusRegion", &CameraControl::setAutoFocusRegion, py::arg("startX"), py::arg("startY"), py::arg("width"), py::arg("height"), DOC(dai, CameraControl, setAutoFocusRegion))
.def("setManualFocus", &CameraControl::setManualFocus, py::arg("lensPosition"), DOC(dai, CameraControl, setManualFocus))
.def("setManualFocusRaw", &CameraControl::setManualFocusRaw, py::arg("lensPositionRaw"), DOC(dai, CameraControl, setManualFocusRaw))
.def("setAutoExposureEnable", &CameraControl::setAutoExposureEnable, DOC(dai, CameraControl, setAutoExposureEnable))
.def("setAutoExposureLock", &CameraControl::setAutoExposureLock, py::arg("lock"), DOC(dai, CameraControl, setAutoExposureLock))
.def("setAutoExposureRegion", &CameraControl::setAutoExposureRegion, py::arg("startX"), py::arg("startY"), py::arg("width"), py::arg("height"), DOC(dai, CameraControl, setAutoExposureRegion))
Expand Down Expand Up @@ -246,6 +248,7 @@ std::vector<const char *> camCtrlAttr;
.def("getExposureTime", &CameraControl::getExposureTime, DOC(dai, CameraControl, getExposureTime))
.def("getSensitivity", &CameraControl::getSensitivity, DOC(dai, CameraControl, getSensitivity))
.def("getLensPosition", &CameraControl::getLensPosition, DOC(dai, CameraControl, getLensPosition))
.def("getLensPositionRaw", &CameraControl::getLensPositionRaw, DOC(dai, CameraControl, getLensPositionRaw))
.def("get", &CameraControl::get, DOC(dai, CameraControl, get))
;
// Add also enum attributes from RawCameraControl
Expand Down
2 changes: 2 additions & 0 deletions src/pipeline/datatype/EncodedFrameBindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -99,6 +99,8 @@ void bind_encodedframe(pybind11::module &m, void *pCallstack) {
DOC(dai, EncodedFrame, getColorTemperature))
.def("getLensPosition", &EncodedFrame::getLensPosition,
DOC(dai, EncodedFrame, getLensPosition))
.def("getLensPositionRaw", &EncodedFrame::getLensPositionRaw,
DOC(dai, EncodedFrame, getLensPositionRaw))
.def("getQuality", &EncodedFrame::getQuality,
DOC(dai, EncodedFrame, getQuality))
.def("getBitrate", &EncodedFrame::getBitrate,
Expand Down
1 change: 1 addition & 0 deletions src/pipeline/datatype/ImgFrameBindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){
.def("getSensitivity", &ImgFrame::getSensitivity, DOC(dai, ImgFrame, getSensitivity))
.def("getColorTemperature", &ImgFrame::getColorTemperature, DOC(dai, ImgFrame, getColorTemperature))
.def("getLensPosition", &ImgFrame::getLensPosition, DOC(dai, ImgFrame, getLensPosition))
.def("getLensPositionRaw", &ImgFrame::getLensPositionRaw, DOC(dai, ImgFrame, getLensPositionRaw))

// OpenCV Support section
.def("setFrame", [](dai::ImgFrame& frm, py::array arr){
Expand Down
1 change: 1 addition & 0 deletions src/pipeline/node/ToFBindings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ void bind_tof(pybind11::module& m, void* pCallstack){
.def_readonly("input", &ToF::input, DOC(dai, node, ToF, input), DOC(dai, node, ToF, input))
.def_readonly("depth", &ToF::depth, DOC(dai, node, ToF, depth), DOC(dai, node, ToF, depth))
.def_readonly("amplitude", &ToF::amplitude, DOC(dai, node, ToF, amplitude), DOC(dai, node, ToF, amplitude))
.def_readonly("intensity", &ToF::intensity, DOC(dai, node, ToF, intensity), DOC(dai, node, ToF, intensity))
.def_readonly("error", &ToF::error, DOC(dai, node, ToF, error), DOC(dai, node, ToF, error))
.def_readonly("initialConfig", &ToF::initialConfig, DOC(dai, node, ToF, initialConfig), DOC(dai, node, ToF, initialConfig))
;
Expand Down
45 changes: 32 additions & 13 deletions utilities/cam_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,20 +53,21 @@

def socket_type_pair(arg):
socket, type = arg.split(',')
if not (socket in ['rgb', 'left', 'right', 'cama', 'camb', 'camc', 'camd']):
if not (socket in ['rgb', 'left', 'right', 'cama', 'camb', 'camc', 'camd', 'came']):
raise ValueError("")
if not (type in ['m', 'mono', 'c', 'color', 't', 'tof']):
if not (type in ['m', 'mono', 'c', 'color', 't', 'tof', 'th', 'thermal']):
raise ValueError("")
is_color = True if type in ['c', 'color'] else False
is_tof = True if type in ['t', 'tof'] else False
return [socket, is_color, is_tof]
is_thermal = True if type in ['th', 'thermal'] else False
return [socket, is_color, is_tof, is_thermal]


parser = argparse.ArgumentParser()
parser.add_argument('-cams', '--cameras', type=socket_type_pair, nargs='+',
default=[['rgb', True, False], ['left', False, False],
['right', False, False], ['camd', True, False]],
help="Which camera sockets to enable, and type: c[olor] / m[ono] / t[of]. "
default=[['rgb', True, False, False], ['left', False, False, False],
['right', False, False, False], ['camd', True, False, False]],
help="Which camera sockets to enable, and type: c[olor] / m[ono] / t[of] / th[ermal]."
"E.g: -cams rgb,m right,c . Default: rgb,c left,m right,m camd,c")
parser.add_argument('-mres', '--mono-resolution', type=int, default=800, choices={480, 400, 720, 800},
help="Select mono camera resolution (height). Default: %(default)s")
Expand Down Expand Up @@ -122,12 +123,14 @@ def socket_type_pair(arg):
cam_list = []
cam_type_color = {}
cam_type_tof = {}
cam_type_thermal = {}
print("Enabled cameras:")
for socket, is_color, is_tof in args.cameras:
for socket, is_color, is_tof, is_thermal in args.cameras:
cam_list.append(socket)
cam_type_color[socket] = is_color
cam_type_tof[socket] = is_tof
print(socket.rjust(7), ':', 'tof' if is_tof else 'color' if is_color else 'mono')
cam_type_thermal[socket] = is_thermal
print(socket.rjust(7), ':', 'tof' if is_tof else 'color' if is_color else 'thermal' if is_thermal else 'mono')

print("DepthAI version:", dai.__version__)
print("DepthAI path:", dai.__file__)
Expand All @@ -140,6 +143,7 @@ def socket_type_pair(arg):
'camb' : dai.CameraBoardSocket.CAM_B,
'camc' : dai.CameraBoardSocket.CAM_C,
'camd' : dai.CameraBoardSocket.CAM_D,
'came' : dai.CameraBoardSocket.CAM_E,
}

rotate = {
Expand All @@ -150,6 +154,7 @@ def socket_type_pair(arg):
'camb': args.rotate in ['all', 'mono'],
'camc': args.rotate in ['all', 'mono'],
'camd': args.rotate in ['all', 'rgb'],
'came': args.rotate in ['all', 'mono'],
}

mono_res_opts = {
Expand Down Expand Up @@ -263,6 +268,16 @@ def prev(self):
xout_tof_amp[c].setStreamName(amp_name)
streams.append(amp_name)
tof[c].amplitude.link(xout_tof_amp[c].input)
elif cam_type_thermal[c]:
cam[c] = pipeline.create(dai.node.Camera)
cam[c].setBoardSocket(cam_socket_opts[c])
cam[c].setPreviewSize(256, 192)
cam[c].raw.link(xout[c].input)
xout_preview = pipeline.create(dai.node.XLinkOut)
xout_preview.setStreamName('preview_' + c)
cam[c].preview.link(xout_preview.input)
streams.append('preview_' + c)

elif cam_type_color[c]:
cam[c] = pipeline.createColorCamera()
cam[c].setResolution(color_res_opts[args.color_resolution])
Expand Down Expand Up @@ -354,16 +369,16 @@ def exit_cleanly(signum, frame):
# Manual exposure/focus set step
EXP_STEP = 500 # us
ISO_STEP = 50
LENS_STEP = 3
LENS_STEP = 1 / 1024
DOT_STEP = 0.05
FLOOD_STEP = 0.05
DOT_MAX = 1
FLOOD_MAX = 1

# Defaults and limits for manual focus/exposure controls
lensPos = 150
lensMin = 0
lensMax = 255
lensPos = 0.59
lensMin = 0.0
lensMax = 1.0

expTime = 20000
expMin = 1
Expand Down Expand Up @@ -423,6 +438,10 @@ def exit_cleanly(signum, frame):
frame = (frame.view(np.int16).astype(float))
frame = cv2.normalize(frame, frame, alpha=255, beta=0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
frame = cv2.applyColorMap(frame, jet_custom)
elif cam_type_thermal[cam_skt] and c.startswith('cam'):
frame = frame.astype(np.float32)
frame = cv2.normalize(frame, frame, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
frame = cv2.applyColorMap(frame, cv2.COLORMAP_MAGMA)
if show:
txt = f"[{c:5}, {pkt.getSequenceNum():4}, {pkt.getTimestamp().total_seconds():.6f}] "
txt += f"Exp: {pkt.getExposureTime().total_seconds()*1000:6.3f} ms, "
Expand Down Expand Up @@ -524,7 +543,7 @@ def exit_cleanly(signum, frame):
lensPos = clamp(lensPos, lensMin, lensMax)
print("Setting manual focus, lens position: ", lensPos)
ctrl = dai.CameraControl()
ctrl.setManualFocus(lensPos)
ctrl.setManualFocusRaw(lensPos)
controlQueue.send(ctrl)
elif key in [ord('i'), ord('o'), ord('k'), ord('l')]:
if key == ord('i'):
Expand Down