跳到主要内容Ubuntu 22.04 与 ROS2 Humble 下 Intel RealSense D435i 相机配置指南 | 极客日志C++AI算法
Ubuntu 22.04 与 ROS2 Humble 下 Intel RealSense D435i 相机配置指南
本文介绍了在 Ubuntu 22.04 和 ROS2 Humble 环境下配置 Intel RealSense D435i 相机的完整流程。内容包括安装 RealSense SDK 及 ROS2 驱动(二进制或源码),创建自定义 ROS2 功能包,编写并发布订阅图像、深度及 IMU 数据的 C++ 节点,配置启动文件以集成 RViz 可视化,以及常用命令和故障排除方法。通过该指南可实现相机的数据采集与基础处理。
DataScient0 浏览 Ubuntu 22.04/ROS2 Humble 下使用 Intel RealSense D435i 相机
1. 安装 RealSense SDK 和 ROS2 驱动
1.1 安装依赖
sudo apt update
sudo apt install -y git cmake libssl-dev libusb-1.0-0-dev pkg-config libgtk-3-dev
apt install -y libglfw3-dev libgl1-mesa-dev libglu1-mesa-dev
apt install -y python3-dev python3-pip
sudo
sudo
1.2 安装 Intel RealSense SDK
方法一:二进制安装(推荐)
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE || sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-key F6E65AC044F831AC80A06380C8B3A55A6F3EFCDE
sudo add-apt-repository "deb https://librealsense.intel.com/Debian/apt-repo $(lsb_release -cs) main" -u
sudo apt-get install librealsense2-dkms librealsense2-utils librealsense2-dev librealsense2-dbg
方法二:源码编译安装(不推荐)
mkdir -p ~/realsense_ws
cd ~/realsense_ws
git clone https://github.com/IntelRealSense/librealsense.git
cd librealsense
./scripts/setup_udev_rules.sh
./scripts/patch-realsense-ubuntu-lts.sh
mkdir build && cd build
cmake .. -DCMAKE_BUILD_TYPE=Release -DBUILD_EXAMPLES=true
make -j$(nproc)
sudo make install
1.3 安装 ROS2 RealSense 包
方法一:二进制安装(推荐)
sudo apt install ros-humble-realsense2-*
方法二:源码编译安装(不推荐)
mkdir -p ~/ros2_ws/src
cd ~/ros2_ws/src
git clone https://github.com/IntelRealSense/realsense-ros.git -b humble
cd ~/ros2_ws
rosdep install -i --from-path src --rosdistro humble -y
colcon build
source install/setup.bash
2. 创建自定义 ROS2 包
2.1 创建包
cd ~/ros2_ws/src
ros2 pkg create d435i_camera --build-type ament_cmake --dependencies rclcpp sensor_msgs cv_bridge image_transport geometry_msgs tf2 tf2_ros
2.2 项目结构
d435i_camera/
├── CMakeLists.txt
├── package.xml
├── include/
├── src/
│ ├── d435i_publisher.cpp
│ └── d435i_subscriber.cpp
└── launch/
├── d435i_launch.py
└── rviz_config.rviz
3. 配置文件
3.1 package.xml
<?xml version="1.0"?>
<?xml-model href="http://download.ros.org/schema/package_format3.xsd" schematypens="http://www.w3.org/2001/XMLSchema"?>
<package format="3">
<name>d435i_camera</name>
<version>0.1.0</version>
<description>D435i Camera ROS2 Package</description>
<maintainer email="[email protected]">Your Name</maintainer>
<license>Apache-2.0</license>
<depend>rclcpp</depend>
<depend>sensor_msgs</depend>
<depend>cv_bridge</depend>
<depend>image_transport</depend>
<depend>geometry_msgs</depend>
<depend>tf2</depend>
<depend>tf2_ros</depend>
<depend>realsense2_camera</depend>
<depend>vision_msgs</depend>
<depend>std_msgs</depend>
<exec_depend>rviz2</exec_depend>
<exec_depend>ros2launch</exec_depend>
<export>
<build_type>ament_cmake</build_type>
</export>
</package>
3.2 CMakeLists.txt
cmake_minimum_required(VERSION 3.8)
project(d435i_camera)
if(CMAKE_COMPILER_IS_GNUCXX OR CMAKE_CXX_COMPILER_ID MATCHES "Clang")
add_compile_options(-Wall -Wextra -Wpedantic)
endif()
# 查找依赖
find_package(ament_cmake REQUIRED)
find_package(rclcpp REQUIRED)
find_package(sensor_msgs REQUIRED)
find_package(cv_bridge REQUIRED)
find_package(image_transport REQUIRED)
find_package(geometry_msgs REQUIRED)
find_package(tf2 REQUIRED)
find_package(tf2_ros REQUIRED)
find_package(vision_msgs REQUIRED)
find_package(std_msgs REQUIRED)
find_package(OpenCV REQUIRED)
# 添加可执行文件
add_executable(d435i_publisher src/d435i_publisher.cpp)
ament_target_dependencies(d435i_publisher rclcpp sensor_msgs cv_bridge image_transport geometry_msgs tf2 tf2_ros vision_msgs std_msgs)
add_executable(d435i_subscriber src/d435i_subscriber.cpp)
ament_target_dependencies(d435i_subscriber rclcpp sensor_msgs cv_bridge std_msgs)
# 安装目标
install(TARGETS d435i_publisher d435i_subscriber DESTINATION lib/${PROJECT_NAME})
# 安装启动文件
install(DIRECTORY launch DESTINATION share/${PROJECT_NAME})
# 安装 rviz 配置
install(FILES launch/rviz_config.rviz DESTINATION share/${PROJECT_NAME}/launch)
ament_package()
4. 源代码文件
4.1 src/d435i_publisher.cpp
#include<rclcpp/rclcpp.hpp>
#include<sensor_msgs/msg/image.hpp>
#include<sensor_msgs/msg/camera_info.hpp>
#include<sensor_msgs/msg/imu.hpp>
#include<cv_bridge/cv_bridge.h>
#include<image_transport/image_transport.hpp>
#include<opencv2/opencv.hpp>
#include<tf2_ros/transform_broadcaster.h>
#include<geometry_msgs/msg/transform_stamped.hpp>
#include<vision_msgs/msg/detection2_d_array.hpp>
class D435iPublisher : public rclcpp::Node {
public:
D435iPublisher() : Node("d435i_publisher") {
color_pub_ = image_transport::create_publisher(this, "camera/color/image_raw");
depth_pub_ = image_transport::create_publisher(this, "camera/depth/image_raw");
imu_pub_ = create_publisher<sensor_msgs::msg::Imu>("camera/imu", 10);
color_info_pub_ = create_publisher<sensor_msgs::msg::CameraInfo>("camera/color/camera_info", 10);
depth_info_pub_ = create_publisher<sensor_msgs::msg::CameraInfo>("camera/depth/camera_info", 10);
tf_broadcaster_ = std::make_unique<tf2_ros::TransformBroadcaster>(this);
timer_ = create_wall_timer(
std::chrono::milliseconds(33),
std::bind(&D435iPublisher::timer_callback, this));
detection_pub_ = create_publisher<vision_msgs::msg::Detection2DArray>("detections", 10);
RCLCPP_INFO(this->get_logger(), "D435i Publisher started");
}
private:
void timer_callback() {
publish_color_image();
publish_depth_image();
publish_imu_data();
publish_camera_info();
publish_tf();
publish_detections();
}
void publish_color_image() {
cv::Mat color_image(480, 640, CV_8UC3, cv::Scalar(100, 100, 255));
cv::circle(color_image, cv::Point(320, 240), 50, cv::Scalar(0, 255, 0), 3);
cv::rectangle(color_image, cv::Rect(100, 100, 200, 200), cv::Scalar(255, 0, 0), 2);
auto msg = cv_bridge::CvImage(std_msgs::msg::Header(), "bgr8", color_image).toImageMsg();
msg->header.stamp = now();
msg->header.frame_id = "camera_color_optical_frame";
color_pub_.publish(msg);
}
void publish_depth_image() {
cv::Mat depth_image(480, 640, CV_16UC1);
for (int i = 0; i < depth_image.rows; i++) {
for (int j = 0; j < depth_image.cols; j++) {
int distance = 1000 + (i * 640 + j) % 500;
depth_image.at<uint16_t>(i, j) = distance;
}
}
auto msg = cv_bridge::CvImage(std_msgs::msg::Header(), "16UC1", depth_image).toImageMsg();
msg->header.stamp = now();
msg->header.frame_id = "camera_depth_optical_frame";
depth_pub_.publish(msg);
}
void publish_imu_data() {
auto imu_msg = std::make_unique<sensor_msgs::msg::Imu>();
imu_msg->header.stamp = now();
imu_msg->header.frame_id = "camera_imu_optical_frame";
imu_msg->linear_acceleration.x = 0.0;
imu_msg->linear_acceleration.y = 0.0;
imu_msg->linear_acceleration.z = 9.81;
imu_msg->angular_velocity.x = 0.01;
imu_msg->angular_velocity.y = 0.02;
imu_msg->angular_velocity.z = 0.03;
imu_pub_->publish(std::move(imu_msg));
}
void publish_camera_info() {
auto color_info = std::make_unique<sensor_msgs::msg::CameraInfo>();
color_info->header.stamp = now();
color_info->header.frame_id = "camera_color_optical_frame";
color_info->height = 480;
color_info->width = 640;
color_info->distortion_model = "plumb_bob";
color_info->d = {0.0, 0.0, 0.0, 0.0, 0.0};
color_info->k = {615.0, 0.0, 320.0, 0.0, 615.0, 240.0, 0.0, 0.0, 1.0};
color_info->r = {1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0};
color_info->p = {615.0, 0.0, 320.0, 0.0, 0.0, 615.0, 240.0, 0.0, 0.0, 0.0, 1.0, 0.0};
auto depth_info = std::make_unique<sensor_msgs::msg::CameraInfo>();
*depth_info = *color_info;
depth_info->header.frame_id = "camera_depth_optical_frame";
color_info_pub_->publish(std::move(color_info));
depth_info_pub_->publish(std::move(depth_info));
}
void publish_tf() {
geometry_msgs::msg::TransformStamped transform;
transform.header.stamp = now();
transform.header.frame_id = "camera_link";
transform.child_frame_id = "camera_color_optical_frame";
transform.transform.translation.x = 0.0;
transform.transform.translation.y = 0.0;
transform.transform.translation.z = 0.0;
transform.transform.rotation.x = 0.0;
transform.transform.rotation.y = 0.0;
transform.transform.rotation.z = 0.0;
transform.transform.rotation.w = 1.0;
tf_broadcaster_->sendTransform(transform);
}
void publish_detections() {
auto detection_msg = std::make_unique<vision_msgs::msg::Detection2DArray>();
detection_msg->header.stamp = now();
detection_msg->header.frame_id = "camera_color_optical_frame";
vision_msgs::msg::Detection2D detection;
detection.bbox.center.position.x = 320.0;
detection.bbox.center.position.y = 240.0;
detection.bbox.size_x = 100.0;
detection.bbox.size_y = 100.0;
detection_msg->detections.push_back(detection);
detection_pub_->publish(std::move(detection_msg));
}
image_transport::Publisher color_pub_;
image_transport::Publisher depth_pub_;
rclcpp::Publisher<sensor_msgs::msg::Imu>::SharedPtr imu_pub_;
rclcpp::Publisher<sensor_msgs::msg::CameraInfo>::SharedPtr color_info_pub_;
rclcpp::Publisher<sensor_msgs::msg::CameraInfo>::SharedPtr depth_info_pub_;
rclcpp::Publisher<vision_msgs::msg::Detection2DArray>::SharedPtr detection_pub_;
std::unique_ptr<tf2_ros::TransformBroadcaster> tf_broadcaster_;
rclcpp::TimerBase::SharedPtr timer_;
};
int main(int argc, char* argv[]) {
rclcpp::init(argc, argv);
auto node = std::make_shared<D435iPublisher>();
rclcpp::spin(node);
rclcpp::shutdown();
return 0;
}
4.2 src/d435i_subscriber.cpp
#include<rclcpp/rclcpp.hpp>
#include<sensor_msgs/msg/image.hpp>
#include<sensor_msgs/msg/imu.hpp>
#include<cv_bridge/cv_bridge.h>
#include<opencv2/opencv.hpp>
#include<memory>
class D435iSubscriber : public rclcpp::Node {
public:
D435iSubscriber() : Node("d435i_subscriber") {
color_sub_ = create_subscription<sensor_msgs::msg::Image>("camera/color/image_raw", 10,
std::bind(&D435iSubscriber::color_callback, this, std::placeholders::_1));
depth_sub_ = create_subscription<sensor_msgs::msg::Image>("camera/depth/image_raw", 10,
std::bind(&D435iSubscriber::depth_callback, this, std::placeholders::_1));
imu_sub_ = create_subscription<sensor_msgs::msg::Imu>("camera/imu", 10,
std::bind(&D435iSubscriber::imu_callback, this, std::placeholders::_1));
RCLCPP_INFO(this->get_logger(), "D435i Subscriber started");
}
private:
void color_callback(const sensor_msgs::msg::Image::SharedPtr msg) {
try {
cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(msg, "bgr8");
cv::Mat image = cv_ptr->image;
RCLCPP_INFO_ONCE(this->get_logger(), "Color Image: %dx%d, Encoding: %s", image.cols, image.rows, msg->encoding.c_str());
cv::Mat gray_image;
cv::cvtColor(image, gray_image, cv::COLOR_BGR2GRAY);
if (show_images_) {
cv::imshow("Color Image", image);
cv::imshow("Gray Image", gray_image);
cv::waitKey(1);
}
} catch (cv_bridge::Exception& e) {
RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what());
}
}
void depth_callback(const sensor_msgs::msg::Image::SharedPtr msg) {
try {
cv_bridge::CvImagePtr cv_ptr = cv_bridge::toCvCopy(msg);
cv::Mat depth_image = cv_ptr->image;
double min_val, max_val;
cv::minMaxLoc(depth_image, &min_val, &max_val);
RCLCPP_INFO_THROTTLE(this->get_logger(), *this->get_clock(), 1000, "Depth Image: Min=%.2fmm, Max=%.2fmm", min_val, max_val);
if (show_images_ && depth_image.type() == CV_16UC1) {
cv::Mat depth_visual;
depth_image.convertTo(depth_visual, CV_8U, 255.0 / max_val);
cv::applyColorMap(depth_visual, depth_visual, cv::COLORMAP_JET);
cv::imshow("Depth Image", depth_visual);
cv::waitKey(1);
}
} catch (cv_bridge::Exception& e) {
RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what());
}
}
void imu_callback(const sensor_msgs::msg::Imu::SharedPtr msg) {
double accel_x = msg->linear_acceleration.x;
double accel_y = msg->linear_acceleration.y;
double accel_z = msg->linear_acceleration.z;
double gyro_x = msg->angular_velocity.x;
double gyro_y = msg->angular_velocity.y;
double gyro_z = msg->angular_velocity.z;
RCLCPP_INFO_THROTTLE(this->get_logger(), *this->get_clock(), 1000, "IMU - Accel: [%.3f, %.3f, %.3f], Gyro: [%.3f, %.3f, %.3f]", accel_x, accel_y, accel_z, gyro_x, gyro_y, gyro_z);
}
rclcpp::Subscription<sensor_msgs::msg::Image>::SharedPtr color_sub_;
rclcpp::Subscription<sensor_msgs::msg::Image>::SharedPtr depth_sub_;
rclcpp::Subscription<sensor_msgs::msg::Imu>::SharedPtr imu_sub_;
bool show_images_ = true;
};
int main(int argc, char* argv[]) {
rclcpp::init(argc, argv);
auto node = std::make_shared<D435iSubscriber>();
rclcpp::spin(node);
rclcpp::shutdown();
return 0;
}
5. 启动文件
5.1 launch/d435i_launch.py
import os
from launch import LaunchDescription
from launch_ros.actions import Node
from ament_index_python.packages import get_package_share_directory
def generate_launch_description():
pkg_dir = get_package_share_directory('d435i_camera')
realsense_node = Node(
package='realsense2_camera',
executable='realsense2_camera_node',
namespace='camera',
name='realsense2_camera',
parameters=[{
'rgb_camera.profile': '640x480x30',
'depth_module.profile': '640x480x30',
'enable_color': True,
'enable_depth': True,
'enable_imu': True,
'align_depth': True,
'enable_pointcloud': True,
'pointcloud_texture_stream': 'RS2_STREAM_COLOR',
'initial_reset': True,
'unite_imu_method': 'linear_interpolation',
}],
output='screen'
)
publisher_node = Node(
package='d435i_camera',
executable='d435i_publisher',
name='d435i_publisher',
output='screen'
)
subscriber_node = Node(
package='d435i_camera',
executable='d435i_subscriber',
name='d435i_subscriber',
output='screen'
)
rviz_config_file = os.path.join(pkg_dir, 'launch', 'rviz_config.rviz')
rviz_node = Node(
package='rviz2',
executable='rviz2',
name='rviz2',
arguments=['-d', rviz_config_file],
output='screen'
)
static_tf_node = Node(
package='tf2_ros',
executable='static_transform_publisher',
arguments=['0', '0', '0', '0', '0', '0', 'camera_link', 'camera_color_optical_frame']
)
return LaunchDescription([
realsense_node,
publisher_node,
subscriber_node,
static_tf_node,
rviz_node
])
6. 编译和运行
6.1 编译包
cd ~/ros2_ws
colcon build --packages-select d435i_camera
source install/setup.bash
6.2 测试 RealSense 驱动
source ~/ros2_ws/install/setup.bash
ros2 launch realsense2_camera rs_launch.py
ros2 topic echo /camera/color/image_raw --once
6.3 运行自定义节点
方法 1:使用启动文件
source install/setup.bash
ros2 launch d435i_camera d435i_launch.py
方法 2:单独运行节点
source install/setup.bash
ros2 run d435i_camera d435i_subscriber
source install/setup.bash
ros2 run d435i_camera d435i_publisher
ros2 topic list
ros2 topic echo /camera/color/image_raw --once
ros2 topic echo /camera/imu --once
7. 常用命令和工具
7.1 查看相机信息
ros2 topic info /camera/color/image_raw
ros2 topic info /camera/depth/image_raw
ros2 topic echo /camera/imu
ros2 run tf2_tools view_frames.py
7.2 使用 rqt 工具
sudo apt install ros-humble-rqt*
8. 故障排除
8.1 常见问题解决
- 权限问题
sudo usermod -a -G dialout $USER
sudo usermod -a -G video $USER
- UDEV 规则问题
sudo cp ~/realsense_ws/librealsense/config/99-realsense-libusb.rules /etc/udev/rules.d/
sudo udevadm control --reload-rules && sudo udevadm trigger
- 重新插拔相机
sudo systemctl restart udev
- 检查相机连接
rs-enumerate-devices
- 测试深度流
realsense-viewer
8.2 验证安装
ros2 topic pub /test std_msgs/msg/String "data: 'Hello ROS2'" -1
9. 高级功能配置
9.1 修改发布频率
parameters=[{
'depth_fps': 30.0,
'color_fps': 30.0,
'gyro_fps': 200.0,
'accel_fps': 63.0,
}]
9.2 启用点云
parameters=[{
'enable_pointcloud': True,
'pointcloud_texture_stream': 'RS2_STREAM_COLOR',
}]
微信扫一扫,关注极客日志
微信公众号「极客日志」,在微信中扫描左侧二维码关注。展示文案:极客日志 zeeklog
相关免费在线工具
- 加密/解密文本
使用加密算法(如AES、TripleDES、Rabbit或RC4)加密和解密文本明文。 在线工具,加密/解密文本在线工具,online
- RSA密钥对生成器
生成新的随机RSA私钥和公钥pem证书。 在线工具,RSA密钥对生成器在线工具,online
- Mermaid 预览与可视化编辑
基于 Mermaid.js 实时预览流程图、时序图等图表,支持源码编辑与即时渲染。 在线工具,Mermaid 预览与可视化编辑在线工具,online
- Base64 字符串编码/解码
将字符串编码和解码为其 Base64 格式表示形式即可。 在线工具,Base64 字符串编码/解码在线工具,online
- Base64 文件转换器
将字符串、文件或图像转换为其 Base64 表示形式。 在线工具,Base64 文件转换器在线工具,online
- Markdown 转 HTML
将 Markdown(GFM)转为 HTML 片段,浏览器内 marked 解析;与 HTML 转 Markdown 互为补充。 在线工具,Markdown 转 HTML在线工具,online