Resolve "Внедрить алгоритм оценки 6D-позы на основе Deep Object Pose Estimation (DOPE)" #187
49 changed files with 3293 additions and 128 deletions
|
@ -67,6 +67,12 @@ install(TARGETS ${PROJECT_NAME}
|
|||
LIBRARY DESTINATION lib
|
||||
)
|
||||
|
||||
install(PROGRAMS
|
||||
scripts/publish_asm_config.py
|
||||
scripts/test_tf.py
|
||||
DESTINATION lib/${PROJECT_NAME}
|
||||
)
|
||||
|
||||
ament_export_targets(export_${PROJECT_NAME} HAS_LIBRARY_TARGET)
|
||||
# ament_export_dependencies(${THIS_PACKAGE_INCLUDE_DEPENDS})
|
||||
ament_package()
|
||||
|
|
|
@ -2,12 +2,13 @@
|
|||
#include "rclcpp_lifecycle/lifecycle_node.hpp"
|
||||
// #include "ros_gz_bridge/convert.hpp"
|
||||
#include "env_interface/env_interface.hpp"
|
||||
#include "geometry_msgs/msg/pose_array.hpp"
|
||||
#include "gz/msgs/pose_v.pb.h"
|
||||
#include <geometry_msgs/msg/transform_stamped.hpp>
|
||||
#include "geometry_msgs/msg/pose_array.hpp"
|
||||
#include <gz/transport/Node.hh>
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
#include <rclcpp/timer.hpp>
|
||||
#include <string>
|
||||
#include <tf2_msgs/msg/detail/tf_message__struct.hpp>
|
||||
#include <tf2_msgs/msg/tf_message.hpp>
|
||||
|
@ -16,6 +17,7 @@
|
|||
#include <gz/sim/Entity.hh>
|
||||
#include <gz/sim/EntityComponentManager.hh>
|
||||
#include <gz/sim/components/Model.hh>
|
||||
#include <thread>
|
||||
|
||||
namespace gz_enviroment {
|
||||
using CallbackReturn =
|
||||
|
@ -37,7 +39,6 @@ protected:
|
|||
|
||||
private:
|
||||
std::unique_ptr<tf2_ros::TransformBroadcaster> m_tf2_broadcaster;
|
||||
std::unique_ptr<tf2_ros::TransformBroadcaster> m_tf2_broadcaster_target;
|
||||
std::shared_ptr<gz::transport::Node> m_gz_node;
|
||||
std::vector<std::string> m_follow_frames;
|
||||
std::string m_topic_name;
|
||||
|
@ -46,6 +47,9 @@ private:
|
|||
std::shared_ptr<rbs_utils::AssemblyConfigLoader> m_config_loader;
|
||||
tf2_msgs::msg::TFMessage m_transforms;
|
||||
tf2_msgs::msg::TFMessage m_target_places;
|
||||
rclcpp::TimerBase::SharedPtr m_timer;
|
||||
|
||||
std::vector<geometry_msgs::msg::TransformStamped> m_all_transforms{};
|
||||
|
||||
std::string getGzWorldName();
|
||||
std::string createGzModelString(const std::vector<double> &pose,
|
||||
|
@ -53,5 +57,14 @@ private:
|
|||
const double &mass,
|
||||
const std::string &mesh_filepath,
|
||||
const std::string &name);
|
||||
|
||||
void broadcast_timer_callback() {
|
||||
if (!m_transforms.transforms.empty()) {
|
||||
for (auto &transform : m_transforms.transforms) {
|
||||
m_tf2_broadcaster->sendTransform(std::move(transform));
|
||||
}
|
||||
// m_transforms.transforms.clear();
|
||||
}
|
||||
}
|
||||
};
|
||||
} // namespace gz_enviroment
|
||||
|
|
114
env_manager/gz_enviroment/scripts/publish_asm_config.py
Executable file
114
env_manager/gz_enviroment/scripts/publish_asm_config.py
Executable file
|
@ -0,0 +1,114 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
import yaml
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
from rbs_utils_interfaces.msg import AssemblyConfig, NamedPose, RelativeNamedPose
|
||||
from geometry_msgs.msg import Point
|
||||
import os
|
||||
|
||||
|
||||
class ConfigPublisherNode(Node):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__('config_publisher_node')
|
||||
self.publisher_ = self.create_publisher(AssemblyConfig, '/assembly_config', 10)
|
||||
timer_period = 1 # seconds
|
||||
self.timer = self.create_timer(timer_period, self.timer_callback)
|
||||
if 'RBS_ASSEMBLY_DIR' in os.environ:
|
||||
assembly_dir = os.environ['RBS_ASSEMBLY_DIR']
|
||||
else:
|
||||
assembly_dir = '~/assembly/robossembler-stuff'
|
||||
self.assembly_dir = os.path.expanduser(assembly_dir)
|
||||
datafolder = "asp-example"
|
||||
# Read data from YAML file
|
||||
with open(f"{self.assembly_dir}/{datafolder}/rbs_db.yaml") as f:
|
||||
self.data = yaml.safe_load(f)
|
||||
|
||||
def timer_callback(self):
|
||||
|
||||
# Create AssemblyConfig message
|
||||
msg = AssemblyConfig()
|
||||
# Fill in the data from the YAML file
|
||||
msg.assets_db = self.data['assets_db']
|
||||
|
||||
# Populate workspace
|
||||
for wp in self.data['workspace']:
|
||||
point = Point()
|
||||
point.x = wp["x"]
|
||||
point.y = wp["y"]
|
||||
point.z = wp["z"]
|
||||
msg.workspace.append(point)
|
||||
|
||||
# Populate absolute_part
|
||||
for part in self.data['absolute_part']:
|
||||
named_pose = NamedPose()
|
||||
named_pose.name = part['name']
|
||||
pose = part['pose']
|
||||
named_pose.pose.position.x = pose['position']['x']
|
||||
named_pose.pose.position.y = pose['position']['y']
|
||||
named_pose.pose.position.z = pose['position']['z']
|
||||
named_pose.pose.orientation.x = pose['orientation']['x']
|
||||
named_pose.pose.orientation.y = pose['orientation']['y']
|
||||
named_pose.pose.orientation.z = pose['orientation']['z']
|
||||
named_pose.pose.orientation.w = pose['orientation']['w']
|
||||
msg.absolute_part.append(named_pose)
|
||||
|
||||
# Populate relative_part
|
||||
for part in self.data['relative_part']:
|
||||
relative_named_pose = RelativeNamedPose()
|
||||
relative_named_pose.name = part['name']
|
||||
relative_named_pose.relative_at = part['relative_at']
|
||||
pose = part['pose']
|
||||
relative_named_pose.pose.position.x = pose['position']['x']
|
||||
relative_named_pose.pose.position.y = pose['position']['y']
|
||||
relative_named_pose.pose.position.z = pose['position']['z']
|
||||
relative_named_pose.pose.orientation.x = pose['orientation']['x']
|
||||
relative_named_pose.pose.orientation.y = pose['orientation']['y']
|
||||
relative_named_pose.pose.orientation.z = pose['orientation']['z']
|
||||
relative_named_pose.pose.orientation.w = pose['orientation']['w']
|
||||
msg.relative_part.append(relative_named_pose)
|
||||
|
||||
# Populate grasp_pose
|
||||
for part in self.data['grasp_pose']:
|
||||
relative_named_pose = RelativeNamedPose()
|
||||
relative_named_pose.name = part['name']
|
||||
relative_named_pose.relative_at = part['relative_at']
|
||||
pose = part['pose']
|
||||
relative_named_pose.pose.position.x = pose['position']['x']
|
||||
relative_named_pose.pose.position.y = pose['position']['y']
|
||||
relative_named_pose.pose.position.z = pose['position']['z']
|
||||
relative_named_pose.pose.orientation.x = pose['orientation']['x']
|
||||
relative_named_pose.pose.orientation.y = pose['orientation']['y']
|
||||
relative_named_pose.pose.orientation.z = pose['orientation']['z']
|
||||
relative_named_pose.pose.orientation.w = pose['orientation']['w']
|
||||
msg.grasp_pose.append(relative_named_pose)
|
||||
|
||||
# Populate extra_poses
|
||||
for part in self.data['extra_poses']:
|
||||
named_pose = NamedPose()
|
||||
named_pose.name = part['name']
|
||||
pose = part['pose']
|
||||
named_pose.pose.position.x = pose['position']['x']
|
||||
named_pose.pose.position.y = pose['position']['y']
|
||||
named_pose.pose.position.z = pose['position']['z']
|
||||
named_pose.pose.orientation.x = pose['orientation']['x']
|
||||
named_pose.pose.orientation.y = pose['orientation']['y']
|
||||
named_pose.pose.orientation.z = pose['orientation']['z']
|
||||
named_pose.pose.orientation.w = pose['orientation']['w']
|
||||
msg.extra_poses.append(named_pose)
|
||||
|
||||
# Publish the message
|
||||
self.publisher_.publish(msg)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
rclpy.init(args=args)
|
||||
config_publisher_node = ConfigPublisherNode()
|
||||
rclpy.spin(config_publisher_node)
|
||||
config_publisher_node.destroy_node()
|
||||
rclpy.shutdown()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
75
env_manager/gz_enviroment/scripts/test_tf.py
Executable file
75
env_manager/gz_enviroment/scripts/test_tf.py
Executable file
|
@ -0,0 +1,75 @@
|
|||
#! /usr/bin/env python3
|
||||
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
from tf2_msgs.msg import TFMessage
|
||||
from geometry_msgs.msg import TransformStamped
|
||||
from rbs_utils_interfaces.msg import AssemblyConfig
|
||||
from tf2_ros.transform_broadcaster import TransformBroadcaster
|
||||
|
||||
|
||||
class TF2PublisherNode(Node):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__('tf2_publisher_node')
|
||||
self.tfb_ = TransformBroadcaster(self)
|
||||
self.subscription = self.create_subscription(
|
||||
AssemblyConfig, '/assembly_config', self.config_callback, 10)
|
||||
|
||||
def config_callback(self, msg):
|
||||
# Populate transforms for absolute_part
|
||||
for part in msg.absolute_part:
|
||||
ts = TransformStamped()
|
||||
ts.header.stamp = self.get_clock().now().to_msg()
|
||||
ts.header.frame_id = "world"
|
||||
ts.child_frame_id = part.name
|
||||
ts.transform.translation.x = part.pose.position.x
|
||||
ts.transform.translation.y = part.pose.position.y
|
||||
ts.transform.translation.z = part.pose.position.z
|
||||
ts.transform.rotation.x = part.pose.orientation.x
|
||||
ts.transform.rotation.y = part.pose.orientation.y
|
||||
ts.transform.rotation.z = part.pose.orientation.z
|
||||
ts.transform.rotation.w = part.pose.orientation.w
|
||||
self.tfb_.sendTransform(ts)
|
||||
|
||||
# Populate transforms for relative_part
|
||||
for part in msg.relative_part:
|
||||
ts = TransformStamped()
|
||||
ts.header.stamp = self.get_clock().now().to_msg()
|
||||
ts.header.frame_id = part.relative_at
|
||||
ts.child_frame_id = part.name
|
||||
ts.transform.translation.x = part.pose.position.x
|
||||
ts.transform.translation.y = part.pose.position.y
|
||||
ts.transform.translation.z = part.pose.position.z
|
||||
ts.transform.rotation.x = part.pose.orientation.x
|
||||
ts.transform.rotation.y = part.pose.orientation.y
|
||||
ts.transform.rotation.z = part.pose.orientation.z
|
||||
ts.transform.rotation.w = part.pose.orientation.w
|
||||
self.tfb_.sendTransform(ts)
|
||||
|
||||
# Populate transforms for grasp_pose
|
||||
for part in msg.grasp_pose:
|
||||
ts = TransformStamped()
|
||||
ts.header.stamp = self.get_clock().now().to_msg()
|
||||
ts.header.frame_id = part.relative_at
|
||||
ts.child_frame_id = part.name
|
||||
ts.transform.translation.x = part.pose.position.x
|
||||
ts.transform.translation.y = part.pose.position.y
|
||||
ts.transform.translation.z = part.pose.position.z
|
||||
ts.transform.rotation.x = part.pose.orientation.x
|
||||
ts.transform.rotation.y = part.pose.orientation.y
|
||||
ts.transform.rotation.z = part.pose.orientation.z
|
||||
ts.transform.rotation.w = part.pose.orientation.w
|
||||
self.tfb_.sendTransform(ts)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
rclpy.init(args=args)
|
||||
tf2_publisher_node = TF2PublisherNode()
|
||||
rclpy.spin(tf2_publisher_node)
|
||||
tf2_publisher_node.destroy_node()
|
||||
rclpy.shutdown()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -2,8 +2,13 @@
|
|||
#include "geometry_msgs/msg/pose_stamped.hpp"
|
||||
#include "rclcpp_lifecycle/node_interfaces/lifecycle_node_interface.hpp"
|
||||
#include "ros_gz_bridge/convert.hpp"
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <iterator>
|
||||
#include <memory>
|
||||
#include <rclcpp/logging.hpp>
|
||||
#include <rclcpp/utilities.hpp>
|
||||
#include <rclcpp_lifecycle/lifecycle_node.hpp>
|
||||
#include <ros_gz_interfaces/msg/detail/entity__builder.hpp>
|
||||
|
||||
|
@ -30,11 +35,15 @@ CallbackReturn GzEnviroment::on_init() {
|
|||
"Model Name: " << modelName->Data());
|
||||
}
|
||||
}
|
||||
|
||||
// getNode()->declare_parameter("use_sim_time", true);
|
||||
|
||||
return CallbackReturn::SUCCESS;
|
||||
}
|
||||
|
||||
CallbackReturn GzEnviroment::on_configure(const rclcpp_lifecycle::State &) {
|
||||
// Configuration of parameters
|
||||
using namespace std::chrono_literals;
|
||||
RCLCPP_INFO(getNode()->get_logger(), "GzEnviroment is on_configure");
|
||||
m_gz_node = std::make_shared<gz::transport::Node>();
|
||||
m_world_name = getGzWorldName();
|
||||
|
@ -42,16 +51,9 @@ CallbackReturn GzEnviroment::on_configure(const rclcpp_lifecycle::State &) {
|
|||
m_service_spawn = std::string("/world/") + m_world_name + "/create";
|
||||
m_config_loader = std::make_shared<rbs_utils::AssemblyConfigLoader>(
|
||||
"asp-example", getNode());
|
||||
m_follow_frames = m_config_loader->getSceneModelNames();
|
||||
// m_target_places = std::make_shared<tf2_msgs::msg::TFMessage>();
|
||||
m_transforms = m_config_loader->getTfData("bishop");
|
||||
// TODO add workspace viewer in Rviz
|
||||
// m_config_loader->printWorkspace();
|
||||
|
||||
// if (!doGzSpawn())
|
||||
// {
|
||||
// return CallbackReturn::ERROR;
|
||||
// }
|
||||
m_follow_frames = m_config_loader->getUniqueSceneModelNames();
|
||||
m_transforms = m_config_loader->getGraspTfData("bishop");
|
||||
// m_transforms = m_config_loader->getAllPossibleTfData();
|
||||
|
||||
return CallbackReturn::SUCCESS;
|
||||
}
|
||||
|
@ -60,11 +62,10 @@ CallbackReturn GzEnviroment::on_activate(const rclcpp_lifecycle::State &) {
|
|||
// Setting up the subscribers and publishers
|
||||
RCLCPP_INFO(getNode()->get_logger(), "GzEnviroment is on_activate");
|
||||
|
||||
m_gz_node->Subscribe(m_topic_name, &GzEnviroment::onGzPoseSub, this);
|
||||
// Initialize tf broadcaster before use in subscriber
|
||||
m_tf2_broadcaster =
|
||||
std::make_unique<tf2_ros::TransformBroadcaster>(getNode());
|
||||
m_tf2_broadcaster_target =
|
||||
std::make_unique<tf2_ros::TransformBroadcaster>(getNode());
|
||||
m_gz_node->Subscribe(m_topic_name, &GzEnviroment::onGzPoseSub, this);
|
||||
|
||||
return CallbackReturn::SUCCESS;
|
||||
}
|
||||
|
@ -86,7 +87,6 @@ CallbackReturn GzEnviroment::on_cleanup(const rclcpp_lifecycle::State &) {
|
|||
|
||||
CallbackReturn GzEnviroment::on_deactivate(const rclcpp_lifecycle::State &) {
|
||||
RCLCPP_INFO(getNode()->get_logger(), "GzEnviroment is on_deactivate");
|
||||
// What we should use here?
|
||||
return CallbackReturn::SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -120,12 +120,11 @@ void GzEnviroment::onGzPoseSub(const gz::msgs::Pose_V &pose_v) {
|
|||
}
|
||||
|
||||
for (auto &place : m_transforms.transforms) {
|
||||
place.header.stamp = getNode()->get_clock()->now();
|
||||
all_transforms.push_back(place);
|
||||
}
|
||||
|
||||
for (auto &transform : all_transforms) {
|
||||
m_tf2_broadcaster->sendTransform(transform);
|
||||
}
|
||||
// m_all_transforms.swap(all_transforms);
|
||||
}
|
||||
|
||||
std::string GzEnviroment::getGzWorldName() {
|
||||
|
|
23
rbs_bringup/config/roboclone.yaml
Normal file
23
rbs_bringup/config/roboclone.yaml
Normal file
|
@ -0,0 +1,23 @@
|
|||
scene_config:
|
||||
- name: arm1
|
||||
type: rbs_arm
|
||||
pose:
|
||||
position:
|
||||
x: -0.45
|
||||
y: -2.0
|
||||
z: 1.6
|
||||
orientation:
|
||||
x: 3.14159
|
||||
y: 0.0
|
||||
z: 0.0
|
||||
- name: arm2
|
||||
type: rbs_arm
|
||||
pose:
|
||||
position:
|
||||
x: 0.45
|
||||
y: -2.0
|
||||
z: 1.6
|
||||
orientation:
|
||||
x: 3.14159
|
||||
y: 0.0
|
||||
z: 0.0
|
254
rbs_bringup/launch/launch_env.launch.py
Normal file
254
rbs_bringup/launch/launch_env.launch.py
Normal file
|
@ -0,0 +1,254 @@
|
|||
from launch import LaunchDescription
|
||||
from launch.actions import (
|
||||
DeclareLaunchArgument,
|
||||
IncludeLaunchDescription,
|
||||
OpaqueFunction
|
||||
)
|
||||
from launch.conditions import IfCondition
|
||||
from launch.launch_description_sources import PythonLaunchDescriptionSource
|
||||
from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
|
||||
from launch_ros.substitutions import FindPackageShare
|
||||
from launch_ros.actions import Node
|
||||
import os
|
||||
from ament_index_python.packages import get_package_share_directory
|
||||
from nav2_common.launch import RewrittenYaml
|
||||
|
||||
def launch_setup(context, *args, **kwargs):
|
||||
# Initialize Arguments
|
||||
robot_type = LaunchConfiguration("robot_type")
|
||||
# General arguments
|
||||
with_gripper_condition = LaunchConfiguration("with_gripper")
|
||||
controllers_file = LaunchConfiguration("controllers_file")
|
||||
cartesian_controllers = LaunchConfiguration("cartesian_controllers")
|
||||
description_package = LaunchConfiguration("description_package")
|
||||
description_file = LaunchConfiguration("description_file")
|
||||
robot_name = LaunchConfiguration("robot_name")
|
||||
start_joint_controller = LaunchConfiguration("start_joint_controller")
|
||||
initial_joint_controller = LaunchConfiguration("initial_joint_controller")
|
||||
launch_simulation = LaunchConfiguration("launch_sim")
|
||||
launch_moveit = LaunchConfiguration("launch_moveit")
|
||||
launch_task_planner = LaunchConfiguration("launch_task_planner")
|
||||
launch_perception = LaunchConfiguration("launch_perception")
|
||||
moveit_config_package = LaunchConfiguration("moveit_config_package")
|
||||
moveit_config_file = LaunchConfiguration("moveit_config_file")
|
||||
use_sim_time = LaunchConfiguration("use_sim_time")
|
||||
sim_gazebo = LaunchConfiguration("sim_gazebo")
|
||||
hardware = LaunchConfiguration("hardware")
|
||||
env_manager = LaunchConfiguration("env_manager")
|
||||
launch_controllers = LaunchConfiguration("launch_controllers")
|
||||
gazebo_gui = LaunchConfiguration("gazebo_gui")
|
||||
gripper_name = LaunchConfiguration("gripper_name")
|
||||
|
||||
|
||||
sim_gazebo = LaunchConfiguration("sim_gazebo")
|
||||
launch_simulation = LaunchConfiguration("launch_sim")
|
||||
|
||||
configured_params = RewrittenYaml(
|
||||
source_file=os.path.join(
|
||||
get_package_share_directory(
|
||||
description_package.perform(context)),
|
||||
"config",
|
||||
controllers_file.perform(context)),
|
||||
root_key=robot_name.perform(context),
|
||||
param_rewrites={},
|
||||
convert_types=True,
|
||||
)
|
||||
initial_joint_controllers_file_path = os.path.join(
|
||||
get_package_share_directory('rbs_arm'), 'config', 'rbs_arm0_controllers.yaml'
|
||||
)
|
||||
controller_paramfile = configured_params.perform(context)
|
||||
namespace = "/" + robot_name.perform(context)
|
||||
|
||||
# spawner = Node(
|
||||
# package="gz_enviroment_python",
|
||||
# executable="spawner.py",
|
||||
# namespace=namespace
|
||||
# )
|
||||
|
||||
single_robot_setup = IncludeLaunchDescription(
|
||||
PythonLaunchDescriptionSource([
|
||||
PathJoinSubstitution([
|
||||
FindPackageShare('rbs_bringup'),
|
||||
"launch",
|
||||
"rbs_robot.launch.py"
|
||||
])
|
||||
]),
|
||||
launch_arguments={
|
||||
"env_manager": env_manager,
|
||||
"with_gripper": with_gripper_condition,
|
||||
"gripper_name": gripper_name,
|
||||
"controllers_file": controllers_file,
|
||||
"robot_type": robot_type,
|
||||
"controllers_file": initial_joint_controllers_file_path,
|
||||
"cartesian_controllers": cartesian_controllers,
|
||||
"description_package": description_package,
|
||||
"description_file": description_file,
|
||||
"robot_name": robot_name,
|
||||
"start_joint_controller": start_joint_controller,
|
||||
"initial_joint_controller": initial_joint_controller,
|
||||
"launch_simulation": launch_simulation,
|
||||
"launch_moveit": launch_moveit,
|
||||
"launch_task_planner": launch_task_planner,
|
||||
"launch_perception": launch_perception,
|
||||
"moveit_config_package": moveit_config_package,
|
||||
"moveit_config_file": moveit_config_file,
|
||||
"use_sim_time": use_sim_time,
|
||||
"sim_gazebo": sim_gazebo,
|
||||
"hardware": hardware,
|
||||
"launch_controllers": "true",
|
||||
"gazebo_gui": gazebo_gui,
|
||||
}.items()
|
||||
)
|
||||
|
||||
nodes_to_start = [
|
||||
# spawner,
|
||||
single_robot_setup
|
||||
]
|
||||
return nodes_to_start
|
||||
|
||||
|
||||
def generate_launch_description():
|
||||
declared_arguments = []
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"robot_type",
|
||||
description="Type of robot by name",
|
||||
choices=["rbs_arm","ur3", "ur3e", "ur5", "ur5e", "ur10", "ur10e", "ur16e"],
|
||||
default_value="rbs_arm",
|
||||
)
|
||||
)
|
||||
# General arguments
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"controllers_file",
|
||||
default_value="rbs_arm_controllers_gazebosim.yaml",
|
||||
description="YAML file with the controllers configuration.",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"description_package",
|
||||
default_value="rbs_arm",
|
||||
description="Description package with robot URDF/XACRO files. Usually the argument \
|
||||
is not set, it enables use of a custom description.",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"description_file",
|
||||
default_value="rbs_arm_modular.xacro",
|
||||
description="URDF/XACRO description file with the robot.",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"robot_name",
|
||||
default_value="arm0",
|
||||
description="Name for robot, used to apply namespace for specific robot in multirobot setup",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"start_joint_controller",
|
||||
default_value="false",
|
||||
description="Enable headless mode for robot control",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"initial_joint_controller",
|
||||
default_value="joint_trajectory_controller",
|
||||
description="Robot controller to start.",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"moveit_config_package",
|
||||
default_value="rbs_arm",
|
||||
description="MoveIt config package with robot SRDF/XACRO files. Usually the argument \
|
||||
is not set, it enables use of a custom moveit config.",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"moveit_config_file",
|
||||
default_value="rbs_arm.srdf.xacro",
|
||||
description="MoveIt SRDF/XACRO description file with the robot.",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"use_sim_time",
|
||||
default_value="true",
|
||||
description="Make MoveIt to use simulation time.\
|
||||
This is needed for the trajectory planing in simulation.",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument(
|
||||
"gripper_name",
|
||||
default_value="rbs_gripper",
|
||||
choices=["rbs_gripper", ""],
|
||||
description="choose gripper by name (leave empty if hasn't)",
|
||||
)
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("with_gripper",
|
||||
default_value="true",
|
||||
description="With gripper or not?")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("sim_gazebo",
|
||||
default_value="true",
|
||||
description="Gazebo Simulation")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("env_manager",
|
||||
default_value="false",
|
||||
description="Launch env_manager?")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("launch_sim",
|
||||
default_value="true",
|
||||
description="Launch simulator (Gazebo)?\
|
||||
Most general arg")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("launch_moveit",
|
||||
default_value="false",
|
||||
description="Launch moveit?")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("launch_perception",
|
||||
default_value="false",
|
||||
description="Launch perception?")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("launch_task_planner",
|
||||
default_value="false",
|
||||
description="Launch task_planner?")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("cartesian_controllers",
|
||||
default_value="true",
|
||||
description="Load cartesian\
|
||||
controllers?")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("hardware",
|
||||
choices=["gazebo", "mock"],
|
||||
default_value="gazebo",
|
||||
description="Choose your harware_interface")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("launch_controllers",
|
||||
default_value="true",
|
||||
description="Launch controllers?")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("gazebo_gui",
|
||||
default_value="true",
|
||||
description="Launch gazebo with gui?")
|
||||
)
|
||||
|
||||
return LaunchDescription(declared_arguments + [OpaqueFunction(function=launch_setup)])
|
|
@ -73,7 +73,7 @@ def launch_setup(context, *args, **kwargs):
|
|||
convert_types=False,
|
||||
).merge_yamls()
|
||||
for robot in robots["scene_config"]:
|
||||
namespace = "/" + robot["name"]
|
||||
namespace: str = "/" + robot["name"]
|
||||
ld.append(IncludeLaunchDescription(
|
||||
PythonLaunchDescriptionSource([
|
||||
PathJoinSubstitution([
|
||||
|
@ -106,9 +106,13 @@ def launch_setup(context, *args, **kwargs):
|
|||
"hardware": hardware,
|
||||
"launch_controllers": launch_controllers,
|
||||
"gazebo_gui": gazebo_gui,
|
||||
"namespace": namespace,
|
||||
"x": str(robot["pose"]["position"]["x"]),
|
||||
"y": str(robot["pose"]["position"]["y"]),
|
||||
"z": str(robot["pose"]["position"]["z"])
|
||||
"z": str(robot["pose"]["position"]["z"]),
|
||||
"roll": str(robot["pose"]["orientation"]["x"]),
|
||||
"pitch": str(robot["pose"]["orientation"]["y"]),
|
||||
"yaw": str(robot["pose"]["orientation"]["z"]),
|
||||
}.items()
|
||||
))
|
||||
|
||||
|
@ -117,9 +121,12 @@ def launch_setup(context, *args, **kwargs):
|
|||
executable='create',
|
||||
arguments=[
|
||||
'-name', robot_name,
|
||||
'-x', "0",#str(robot["spawn_point"][0]),
|
||||
'-y', "0",#str(robot["spawn_point"][1]),
|
||||
'-z', "0",#str(robot["spawn_point"][2]),
|
||||
# '-x', str(robot["pose"]["position"]["x"]),
|
||||
# '-y', str(robot["pose"]["position"]["y"]),
|
||||
# '-z', str(robot["pose"]["position"]["z"]),
|
||||
# '-R', str(robot["pose"]["orientation"]["x"]),
|
||||
# '-P', str(robot["pose"]["orientation"]["y"]),
|
||||
# '-Y', str(robot["pose"]["orientation"]["z"]),
|
||||
'-topic', namespace + '/robot_description'],
|
||||
output='screen',
|
||||
condition=IfCondition(sim_gazebo))
|
||||
|
|
|
@ -15,6 +15,8 @@ import xacro
|
|||
import os
|
||||
from ament_index_python.packages import get_package_share_directory
|
||||
|
||||
from rbs_arm import RbsBuilder
|
||||
|
||||
def launch_setup(context, *args, **kwargs):
|
||||
# Initialize Arguments
|
||||
robot_type = LaunchConfiguration("robot_type")
|
||||
|
@ -39,14 +41,18 @@ def launch_setup(context, *args, **kwargs):
|
|||
x_pos = LaunchConfiguration("x")
|
||||
y_pos = LaunchConfiguration("y")
|
||||
z_pos = LaunchConfiguration("z")
|
||||
roll = LaunchConfiguration("roll")
|
||||
pitch = LaunchConfiguration("pitch")
|
||||
yaw = LaunchConfiguration("yaw")
|
||||
namespace = LaunchConfiguration("namespace")
|
||||
robot_name = robot_name.perform(context)
|
||||
namespace = "/" + robot_name
|
||||
namespace = namespace.perform(context)
|
||||
robot_type = robot_type.perform(context)
|
||||
description_package = description_package.perform(context)
|
||||
description_file = description_file.perform(context)
|
||||
controllers_file = controllers_file.perform(context)
|
||||
|
||||
remappings = [("/tf", "tf"), ("/tf_static", "tf_static")]
|
||||
# remappings = [("/tf", "tf"), ("/tf_static", "tf_static")]
|
||||
|
||||
xacro_file = os.path.join(get_package_share_directory(description_package), "urdf", description_file)
|
||||
robot_description_doc = xacro.process_file(
|
||||
|
@ -58,10 +64,19 @@ def launch_setup(context, *args, **kwargs):
|
|||
"namespace": namespace,
|
||||
"x": x_pos.perform(context),
|
||||
"y": y_pos.perform(context),
|
||||
"z": z_pos.perform(context)
|
||||
"z": z_pos.perform(context),
|
||||
"roll": roll.perform(context),
|
||||
"pitch": pitch.perform(context),
|
||||
"yaw": yaw.perform(context)
|
||||
#TODO: add rotation and add probably via dict
|
||||
}
|
||||
)
|
||||
|
||||
# robot = RbsBuilder(6, "arm0", "world", "rbs_gripper")
|
||||
# robot.base()
|
||||
# robot.gripper()
|
||||
# robot.ros2_control()
|
||||
# robot.moveit()
|
||||
robot_description_content = robot_description_doc.toprettyxml(indent=" ")
|
||||
|
||||
robot_description = {"robot_description": robot_description_content}
|
||||
|
@ -89,8 +104,27 @@ def launch_setup(context, *args, **kwargs):
|
|||
executable="robot_state_publisher",
|
||||
namespace=namespace,
|
||||
output="both",
|
||||
remappings=remappings,
|
||||
parameters=[{"use_sim_time": True}, robot_description],
|
||||
# remappings=remappings,
|
||||
parameters=[{"use_sim_time": use_sim_time}, robot_description],
|
||||
)
|
||||
|
||||
rviz_config_file = PathJoinSubstitution(
|
||||
[FindPackageShare("rbs_bringup"), "config", "rbs.rviz"]
|
||||
)
|
||||
|
||||
rviz = Node(
|
||||
package="rviz2",
|
||||
executable="rviz2",
|
||||
name="rviz2",
|
||||
output="log",
|
||||
namespace=namespace,
|
||||
arguments=["-d", rviz_config_file],
|
||||
parameters=[
|
||||
robot_description,
|
||||
robot_description_semantic,
|
||||
robot_description_kinematics,
|
||||
{'use_sim_time': use_sim_time}
|
||||
]
|
||||
)
|
||||
|
||||
control = IncludeLaunchDescription(
|
||||
|
@ -182,7 +216,8 @@ def launch_setup(context, *args, **kwargs):
|
|||
moveit,
|
||||
skills,
|
||||
task_planner,
|
||||
perception
|
||||
perception,
|
||||
# rviz
|
||||
]
|
||||
return nodes_to_start
|
||||
|
||||
|
@ -337,6 +372,11 @@ def generate_launch_description():
|
|||
default_value="false",
|
||||
description="Launch gazebo with gui?")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("namespace",
|
||||
default_value="",
|
||||
description="The ROS2 namespace of a robot")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("x",
|
||||
default_value="0.0",
|
||||
|
@ -352,6 +392,21 @@ def generate_launch_description():
|
|||
default_value="0.0",
|
||||
description="Position of robot in world by Z")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("roll",
|
||||
default_value="0.0",
|
||||
description="Position of robot in world by Z")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("pitch",
|
||||
default_value="0.0",
|
||||
description="Position of robot in world by Z")
|
||||
)
|
||||
declared_arguments.append(
|
||||
DeclareLaunchArgument("yaw",
|
||||
default_value="0.0",
|
||||
description="Position of robot in world by Z")
|
||||
)
|
||||
|
||||
|
||||
return LaunchDescription(declared_arguments + [OpaqueFunction(function=launch_setup)])
|
||||
|
|
|
@ -58,27 +58,31 @@ def launch_setup(context, *args, **kwargs):
|
|||
condition=IfCondition(launch_simulation)
|
||||
)
|
||||
|
||||
configured_params = RewrittenYaml(
|
||||
source_file=os.path.join(
|
||||
get_package_share_directory(
|
||||
description_package.perform(context)),
|
||||
"config",
|
||||
controllers_file.perform(context)),
|
||||
root_key=robot_name.perform(context),
|
||||
param_rewrites={},
|
||||
convert_types=True,
|
||||
# FIXME: namespaces
|
||||
# configured_params = RewrittenYaml(
|
||||
# source_file=os.path.join(
|
||||
# get_package_share_directory(
|
||||
# description_package.perform(context)),
|
||||
# "config",
|
||||
# controllers_file.perform(context)),
|
||||
# root_key=robot_name.perform(context),
|
||||
# param_rewrites={},
|
||||
# convert_types=True,
|
||||
# )
|
||||
|
||||
initial_joint_controllers_file_path = os.path.join(
|
||||
get_package_share_directory('rbs_arm'), 'config', 'rbs_arm0_controllers.yaml'
|
||||
)
|
||||
controller_paramfile = configured_params.perform(context)
|
||||
namespace = "/" + robot_name.perform(context)
|
||||
|
||||
# controller_paramfile = configured_params.perform(context)
|
||||
# namespace = "/" + robot_name.perform(context)
|
||||
namespace = ""
|
||||
|
||||
gz_spawner = Node(
|
||||
package='ros_gz_sim',
|
||||
executable='create',
|
||||
arguments=[
|
||||
'-name', robot_name.perform(context),
|
||||
'-x', "0.5",
|
||||
'-y', "0.5",
|
||||
'-z', "0.0",
|
||||
'-topic', namespace + '/robot_description'],
|
||||
output='screen',
|
||||
condition=IfCondition(sim_gazebo))
|
||||
|
@ -97,11 +101,11 @@ def launch_setup(context, *args, **kwargs):
|
|||
"gripper_name": gripper_name,
|
||||
"controllers_file": controllers_file,
|
||||
"robot_type": robot_type,
|
||||
"controllers_file": controller_paramfile,
|
||||
"controllers_file": initial_joint_controllers_file_path,
|
||||
"cartesian_controllers": cartesian_controllers,
|
||||
"description_package": description_package,
|
||||
"description_file": description_file,
|
||||
"robot_name": robot_name,
|
||||
"robot_name": robot_type,
|
||||
"start_joint_controller": start_joint_controller,
|
||||
"initial_joint_controller": initial_joint_controller,
|
||||
"launch_simulation": launch_simulation,
|
||||
|
@ -115,9 +119,9 @@ def launch_setup(context, *args, **kwargs):
|
|||
"hardware": hardware,
|
||||
"launch_controllers": "true",
|
||||
"gazebo_gui": gazebo_gui,
|
||||
"x": "0.5",
|
||||
"y": "0.5",
|
||||
"z": "0.5"
|
||||
# "x": "0.5",
|
||||
# "y": "0.5",
|
||||
# "z": "0.5"
|
||||
}.items()
|
||||
)
|
||||
|
||||
|
|
|
@ -80,6 +80,9 @@ list(APPEND plugin_libs rbs_env_manager_starter)
|
|||
add_library(rbs_skill_move_topose_array_bt_action_client SHARED src/MoveToPoseArray.cpp)
|
||||
list(APPEND plugin_libs rbs_skill_move_topose_array_bt_action_client)
|
||||
|
||||
add_library(rbs_interface SHARED src/rbsBTAction.cpp)
|
||||
list(APPEND plugin_libs rbs_interface)
|
||||
|
||||
foreach(bt_plugin ${plugin_libs})
|
||||
ament_target_dependencies(${bt_plugin} ${dependencies})
|
||||
target_compile_definitions(${bt_plugin} PRIVATE BT_PLUGIN_EXPORT)
|
||||
|
|
17
rbs_bt_executor/bt_trees/bt_movetopose.xml
Normal file
17
rbs_bt_executor/bt_trees/bt_movetopose.xml
Normal file
|
@ -0,0 +1,17 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root main_tree_to_execute="Main">
|
||||
<BehaviorTree ID="Main">
|
||||
<Sequence>
|
||||
<Action ID="RbsBtAction" do="MoveToPose" command="move" server_name="rbs_interface" server_timeout="1000" />
|
||||
</Sequence>
|
||||
</BehaviorTree>
|
||||
|
||||
<TreeNodesModel>
|
||||
<Action ID="RbsBtAction">
|
||||
<input_port name="do" />
|
||||
<input_port name="command" />
|
||||
<input_port name="server_name" />
|
||||
<input_port name="server_timeout" />
|
||||
</Action>
|
||||
</TreeNodesModel>
|
||||
</root>
|
17
rbs_bt_executor/bt_trees/bt_pe_stop.xml
Normal file
17
rbs_bt_executor/bt_trees/bt_pe_stop.xml
Normal file
|
@ -0,0 +1,17 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root main_tree_to_execute="Main">
|
||||
<BehaviorTree ID="Main">
|
||||
<Sequence>
|
||||
<Action ID="RbsBtAction" do="PoseEstimation" command="peStop" server_name="rbs_interface" server_timeout="1000" />
|
||||
</Sequence>
|
||||
</BehaviorTree>
|
||||
|
||||
<TreeNodesModel>
|
||||
<Action ID="RbsBtAction">
|
||||
<input_port name="do" />
|
||||
<input_port name="command" />
|
||||
<input_port name="server_name" />
|
||||
<input_port name="server_timeout" />
|
||||
</Action>
|
||||
</TreeNodesModel>
|
||||
</root>
|
17
rbs_bt_executor/bt_trees/bt_pe_test.xml
Normal file
17
rbs_bt_executor/bt_trees/bt_pe_test.xml
Normal file
|
@ -0,0 +1,17 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root main_tree_to_execute="Main">
|
||||
<BehaviorTree ID="Main">
|
||||
<Sequence>
|
||||
<Action ID="RbsBtAction" do="PoseEstimation" command="peConfigure" server_name="rbs_interface" server_timeout="1000" />
|
||||
</Sequence>
|
||||
</BehaviorTree>
|
||||
|
||||
<TreeNodesModel>
|
||||
<Action ID="RbsBtAction">
|
||||
<input_port name="do" />
|
||||
<input_port name="command" />
|
||||
<input_port name="server_name" />
|
||||
<input_port name="server_timeout" />
|
||||
</Action>
|
||||
</TreeNodesModel>
|
||||
</root>
|
22
rbs_bt_executor/bt_trees/bt_template.xml
Normal file
22
rbs_bt_executor/bt_trees/bt_template.xml
Normal file
|
@ -0,0 +1,22 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<root main_tree_to_execute="Main">
|
||||
<BehaviorTree ID="Main">
|
||||
<Sequence>
|
||||
<Action ID="RbsBtAction" do="PoseEstimation" command="peConfigure" server_name="rbs_interface" server_timeout="1000" />
|
||||
<Action ID="RbsBtAction" do="PoseEstimation" command="peStop" server_name="rbs_interface" server_timeout="1000" />
|
||||
</Sequence>
|
||||
</BehaviorTree>
|
||||
|
||||
<TreeNodesModel>
|
||||
<Action ID="RbsBtAction">
|
||||
<input_port name="do" />
|
||||
<input_port name="command" />
|
||||
<input_port name="server_name" />
|
||||
<input_port name="server_timeout" />
|
||||
</Action>
|
||||
</TreeNodesModel>
|
||||
<!-- <Action ID="RbsBtAction" do="ObjectDetection" command="odConfigure" server_name="rbs_interface" server_timeout="1000" />
|
||||
<Action ID="RbsBtAction" do="ObjectDetection" command="odStop" server_name="rbs_interface" server_timeout="1000" /> -->
|
||||
<!-- <input_port name="cancel_timeout" /> -->
|
||||
<!-- cancel_timeout="1000" -->
|
||||
</root>
|
36
rbs_bt_executor/bt_trees/test_roboclone.xml
Normal file
36
rbs_bt_executor/bt_trees/test_roboclone.xml
Normal file
|
@ -0,0 +1,36 @@
|
|||
<?xml version="1.0"?>
|
||||
<root main_tree_to_execute="MainTree">
|
||||
<!-- ////////// -->
|
||||
<BehaviorTree ID="MainTree">
|
||||
<Sequence>
|
||||
<Action ID="EnvStarter" env_class="gz_enviroment::GzEnviroment" env_name="gz_enviroment"
|
||||
server_name="/env_manager/start_env" server_timeout="1000" workspace="{workspace}" />
|
||||
<SubTreePlus ID="WorkspaceInspection" __autoremap="1" goal_pose="{workspace}"
|
||||
robot_name="rbs_arm" />
|
||||
</Sequence>
|
||||
</BehaviorTree>
|
||||
<!-- ////////// -->
|
||||
<BehaviorTree ID="WorkspaceInspection">
|
||||
<Sequence>
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/arm1/move_topose" server_timeout="5000" />
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/arm1/move_cartesian" server_timeout="5000" />
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/arm1/move_cartesian" server_timeout="5000" />
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/arm1/move_cartesian" server_timeout="5000" />
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/arm/move_cartesian" server_timeout="5000" />
|
||||
|
||||
</Sequence>
|
||||
</BehaviorTree>
|
||||
<!-- ////////// -->
|
||||
<include path="./nodes_interfaces/general.xml"/>
|
||||
<!-- ////////// -->
|
||||
</root>
|
|
@ -14,19 +14,19 @@
|
|||
<Sequence>
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/move_topose" server_timeout="5000" />
|
||||
robot_name="{robot_name}" server_name="/cartesian_move_to_pose" server_timeout="5000" />
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/move_cartesian" server_timeout="5000" />
|
||||
robot_name="{robot_name}" server_name="/cartesian_move_to_pose" server_timeout="5000" />
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/move_cartesian" server_timeout="5000" />
|
||||
robot_name="{robot_name}" server_name="/cartesian_move_to_pose" server_timeout="5000" />
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/move_cartesian" server_timeout="5000" />
|
||||
robot_name="{robot_name}" server_name="/cartesian_move_to_pose" server_timeout="5000" />
|
||||
<Action ID="MoveToPoseArray" cancel_timeout="5000" pose_vec_in="{goal_pose}"
|
||||
pose_vec_out="{goal_pose}"
|
||||
robot_name="{robot_name}" server_name="/move_cartesian" server_timeout="5000" />
|
||||
robot_name="{robot_name}" server_name="/cartesian_move_to_pose" server_timeout="5000" />
|
||||
|
||||
</Sequence>
|
||||
</BehaviorTree>
|
||||
|
|
|
@ -9,6 +9,7 @@ bt_engine:
|
|||
"rbs_pose_estimation",
|
||||
"rbs_object_detection",
|
||||
"rbs_env_manager_starter",
|
||||
"rbs_skill_move_topose_array_bt_action_client"
|
||||
"rbs_skill_move_topose_array_bt_action_client",
|
||||
"rbs_interface"
|
||||
]
|
||||
|
||||
|
|
|
@ -27,11 +27,16 @@ def generate_launch_description():
|
|||
Node(
|
||||
package='behavior_tree',
|
||||
executable='bt_engine',
|
||||
# prefix=['gdbserver localhost:3000'],
|
||||
# prefix=['gdbserver localhost:1234'],
|
||||
parameters=[
|
||||
btfile_param,
|
||||
bt_skills_param
|
||||
]
|
||||
bt_skills_param,
|
||||
{'use_sim_time': True}
|
||||
],
|
||||
# arguments=[
|
||||
# "--ros-args",
|
||||
# "--log-level", "debug",
|
||||
# ],
|
||||
)
|
||||
]
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ public:
|
|||
if (response->ok) {
|
||||
// TODO We need better perfomance for call it in another place for all BT nodes
|
||||
// - Make it via shared_ptr forward throught blackboard
|
||||
auto t = std::make_shared<rbs_utils::AssemblyConfigLoader>("asp-example2", _node);
|
||||
auto t = std::make_shared<rbs_utils::AssemblyConfigLoader>("asp-example", _node);
|
||||
auto p = t->getWorkspaceInspectorTrajectory();
|
||||
setOutput("workspace", p);
|
||||
return BT::NodeStatus::SUCCESS;
|
||||
|
|
|
@ -29,8 +29,8 @@ class MoveToPose : public BtAction<MoveToPoseAction> {
|
|||
|
||||
goal.robot_name = robot_name_;
|
||||
goal.target_pose = pose_des;
|
||||
goal.end_effector_acceleration = 0.5;
|
||||
goal.end_effector_velocity = 0.5;
|
||||
goal.end_effector_acceleration = 1.0;
|
||||
goal.end_effector_velocity = 1.0;
|
||||
|
||||
RCLCPP_INFO(_node->get_logger(), "Goal populated");
|
||||
|
||||
|
|
82
rbs_bt_executor/src/rbsBTAction.cpp
Normal file
82
rbs_bt_executor/src/rbsBTAction.cpp
Normal file
|
@ -0,0 +1,82 @@
|
|||
// #include "behavior_tree/BtAction.hpp"
|
||||
#include "behavior_tree/BtService.hpp"
|
||||
// #include "rbs_skill_interfaces/action/rbs_bt.hpp"
|
||||
#include "rbs_skill_interfaces/srv/rbs_bt.hpp"
|
||||
|
||||
// using RbsBtActionSrv = rbs_skill_interfaces::action::RbsBt;
|
||||
using RbsBtActionSrv = rbs_skill_interfaces::srv::RbsBt;
|
||||
|
||||
class RbsBtAction : public BtService<RbsBtActionSrv> {
|
||||
public:
|
||||
RbsBtAction(const std::string &name, const BT::NodeConfiguration &config)
|
||||
: BtService<RbsBtActionSrv>(name, config) {
|
||||
|
||||
_action_name = getInput<std::string>("do").value();
|
||||
RCLCPP_INFO_STREAM(_node->get_logger(), "Start node RbsBtAction: " + _action_name);
|
||||
|
||||
_set_params_client = std::make_shared<rclcpp::AsyncParametersClient>(_node, "rbs_interface");
|
||||
|
||||
while (!_set_params_client->wait_for_service(1s)) {
|
||||
if (!rclcpp::ok()) {
|
||||
RCLCPP_ERROR(_node->get_logger(), "Interrupted while waiting for the service. Exiting.");
|
||||
break;
|
||||
}
|
||||
RCLCPP_WARN(_node->get_logger(), "'rbs_interface' service not available, waiting again...");
|
||||
}
|
||||
// set_str_param();
|
||||
}
|
||||
|
||||
// RbsBtActionSrv::Goal populate_goal() override {
|
||||
// auto goal = RbsBtActionSrv::Goal();
|
||||
// goal.action = _action_name;
|
||||
// goal.command = getInput<std::string>("command").value();
|
||||
// RCLCPP_INFO_STREAM(_node->get_logger(), "Goal send " + _action_name);
|
||||
// return goal;
|
||||
// }
|
||||
|
||||
RbsBtActionSrv::Request::SharedPtr populate_request() override {
|
||||
auto request = std::make_shared<RbsBtActionSrv::Request>();
|
||||
request->action = _action_name;
|
||||
request->command = getInput<std::string>("command").value();
|
||||
// RCLCPP_INFO_STREAM(_node->get_logger(), "RbsBtAction:populate_request");
|
||||
|
||||
return request;
|
||||
}
|
||||
BT::NodeStatus handle_response(const RbsBtActionSrv::Response::SharedPtr response) override {
|
||||
if (response->ok) {
|
||||
// TODO We need better perfomance for call it in another place for all BT nodes
|
||||
// - Make it via shared_ptr forward throught blackboard
|
||||
// auto t = std::make_shared<rbs_utils::AssemblyConfigLoader>("asp-example2", _node);
|
||||
// auto p = t->getWorkspaceInspectorTrajectory();
|
||||
// setOutput("workspace", p);
|
||||
// RCLCPP_INFO_STREAM(_node->get_logger(), "RbsBtAction:handle_response");
|
||||
return BT::NodeStatus::SUCCESS;
|
||||
}
|
||||
return BT::NodeStatus::FAILURE;
|
||||
}
|
||||
|
||||
static BT::PortsList providedPorts() {
|
||||
return providedBasicPorts({
|
||||
BT::InputPort<std::string>("do"),
|
||||
BT::InputPort<std::string>("command")
|
||||
});
|
||||
}
|
||||
|
||||
private:
|
||||
std::string _action_name{};
|
||||
std::shared_ptr<rclcpp::AsyncParametersClient> _set_params_client;
|
||||
|
||||
// void set_str_param() {
|
||||
// RCLCPP_INFO_STREAM(_node->get_logger(),"Set string parameter: <" + _action_name + ">");
|
||||
|
||||
// std::vector<rclcpp::Parameter> _parameters{rclcpp::Parameter("action_name", _action_name)};
|
||||
// _set_params_client->set_parameters(_parameters);
|
||||
// }
|
||||
// auto _package_name = ament_index_cpp::get_package_share_directory("rbs_perception");
|
||||
};
|
||||
|
||||
#include "behaviortree_cpp_v3/bt_factory.h"
|
||||
|
||||
BT_REGISTER_NODES(factory) {
|
||||
factory.registerNodeType<RbsBtAction>("RbsBtAction");
|
||||
}
|
|
@ -37,6 +37,8 @@ install(PROGRAMS
|
|||
scripts/grasp_marker_publish.py
|
||||
scripts/pose_estimation.py
|
||||
scripts/pose_estimation_lifecycle.py
|
||||
scripts/pe_dope_lc.py
|
||||
scripts/rbs_interface.py
|
||||
DESTINATION lib/${PROJECT_NAME}
|
||||
)
|
||||
|
||||
|
@ -70,7 +72,6 @@ install(
|
|||
DESTINATION share/${PROJECT_NAME}
|
||||
)
|
||||
|
||||
|
||||
install(TARGETS
|
||||
pc_filter
|
||||
DESTINATION lib/${PROJECT_NAME}
|
||||
|
|
24
rbs_perception/config/MoveToPose.json
Normal file
24
rbs_perception/config/MoveToPose.json
Normal file
|
@ -0,0 +1,24 @@
|
|||
{
|
||||
"SkillPackage": {
|
||||
"name": "Robossembler", "version": "1.0", "format": "1"
|
||||
},
|
||||
"Module": {
|
||||
"name": "MoveToPose", "description": "Move to Pose skill with dekart controllers"
|
||||
},
|
||||
"Launch": {
|
||||
"executable": "movetopose_lc.py"
|
||||
},
|
||||
"ROS2": {
|
||||
"node_name": "lc_dc"
|
||||
},
|
||||
"BTAction": [
|
||||
{
|
||||
"name": "move",
|
||||
"format": "yaml",
|
||||
"type": "run",
|
||||
"param":["robot_name","pose"],
|
||||
"result":[]
|
||||
}
|
||||
],
|
||||
"Settings": []
|
||||
}
|
47
rbs_perception/config/PoseEstimation.json
Normal file
47
rbs_perception/config/PoseEstimation.json
Normal file
|
@ -0,0 +1,47 @@
|
|||
{
|
||||
"SkillPackage": {
|
||||
"name": "Robossembler", "version": "1.0", "format": "1"
|
||||
},
|
||||
"Module": {
|
||||
"name": "PoseEstimation", "description": "Pose Estimation skill with DOPE"
|
||||
},
|
||||
"Launch": {
|
||||
"executable": "pe_dope_lc.py"
|
||||
},
|
||||
"ROS2": {
|
||||
"node_name": "lc_dope"
|
||||
},
|
||||
"BTAction": [
|
||||
{
|
||||
"name": "peConfigure",
|
||||
"format": "yaml",
|
||||
"type": "run",
|
||||
"param":["object_name","weights_file"],
|
||||
"result":["Pose"]
|
||||
},
|
||||
{
|
||||
"name": "peStop",
|
||||
"format": "yaml",
|
||||
"type": "stop",
|
||||
"param":[],
|
||||
"result":[]
|
||||
}
|
||||
],
|
||||
"Interface": {
|
||||
"Input": [
|
||||
{
|
||||
"name": "cameraLink", "type": "CAMERA"
|
||||
},
|
||||
{
|
||||
"name": "object_name", "type": "MODEL"
|
||||
}
|
||||
],
|
||||
"Output":
|
||||
[
|
||||
{
|
||||
"name": "pose_estimation_topic", "type": "Pose"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Settings": []
|
||||
}
|
3
rbs_perception/config/move.yaml
Normal file
3
rbs_perception/config/move.yaml
Normal file
|
@ -0,0 +1,3 @@
|
|||
robot_name: arm1
|
||||
# loc_xyz, rot_euler
|
||||
pose: [0.137, 0.165, 0.202, 0.0, 0.0, 3.14]
|
3
rbs_perception/config/peConfigure.yaml
Normal file
3
rbs_perception/config/peConfigure.yaml
Normal file
|
@ -0,0 +1,3 @@
|
|||
object_name: fork
|
||||
weights_file: /home/shalenikol/robossembler_ws/fork_e47.pth
|
||||
dimensions: [0.137, 0.165, 0.202]
|
24
rbs_perception/config/pe_dope_config.yaml
Normal file
24
rbs_perception/config/pe_dope_config.yaml
Normal file
|
@ -0,0 +1,24 @@
|
|||
input_is_rectified: True # Whether the input image is rectified (strongly suggested!)
|
||||
downscale_height: 480 # if the input image is larger than this, scale it down to this pixel height
|
||||
|
||||
# # Cuboid dimension in cm x,y,z
|
||||
# dimensions: {
|
||||
# "model1": [13.7, 16.5, 20.2],
|
||||
# "model2": [10.0, 10.0, 22.64],
|
||||
# }
|
||||
|
||||
# class_ids: {
|
||||
# "model1": 1,
|
||||
# "model2": 2,
|
||||
# }
|
||||
|
||||
# draw_colors: {
|
||||
# "model1": [13, 255, 128], # green
|
||||
# "model2": [255, 255, 255], # white
|
||||
# }
|
||||
|
||||
# Config params for DOPE
|
||||
thresh_angle: 0.5
|
||||
thresh_map: 0.01
|
||||
sigma: 3
|
||||
thresh_points: 0.0
|
34
rbs_perception/config/skill_scheme.json
Normal file
34
rbs_perception/config/skill_scheme.json
Normal file
|
@ -0,0 +1,34 @@
|
|||
ENUM CFG_FORMAT = "yaml","json"
|
||||
ENUM ACTION_TYPE = "run","stop"
|
||||
|
||||
BTACTION = {
|
||||
"name": ${NAME:string:"Configure"},
|
||||
"format": ${enum:CFG_FORMAT:"yaml"},
|
||||
"type": ${enum:ACTION_TYPE:"run"},
|
||||
"param": ${ARRAY:string:[]},
|
||||
"result": ${ARRAY:string:[]}
|
||||
}
|
||||
SETTING = {
|
||||
"name": ${NAME:string:""},
|
||||
"value": ${VALUE:string:""}
|
||||
}
|
||||
|
||||
{
|
||||
"SkillPackage": {
|
||||
"name": ${NAME:constant:"Robossembler"},
|
||||
"version": ${VERSION:constant:"1.0"},
|
||||
"format": ${FORMAT:constant:"1"}
|
||||
},
|
||||
"Module": {
|
||||
"name": ${NAME:string:"skill_scheme"},
|
||||
"description": ${DESCRIPTION:string:"Configuration scheme of a skill"}
|
||||
},
|
||||
"Launch": {
|
||||
"executable": ${EXE:string:"node_lc.py"}
|
||||
},
|
||||
"ROS2": {
|
||||
"node_name": ${NODE_NAME:string:"lc_implementation"}
|
||||
},
|
||||
"BTAction": ${ARRAY:BTACTION:[]},
|
||||
"Settings": ${ARRAY:SETTING:[]}
|
||||
}
|
|
@ -4,6 +4,10 @@ from launch import LaunchDescription
|
|||
def generate_launch_description():
|
||||
declared_arguments = []
|
||||
|
||||
rbs_interface = Node(
|
||||
package="rbs_perception",
|
||||
executable="rbs_interface.py",
|
||||
)
|
||||
pose_estimation = Node(
|
||||
package="rbs_perception",
|
||||
executable="pose_estimation_lifecycle.py",
|
||||
|
@ -14,6 +18,7 @@ def generate_launch_description():
|
|||
)
|
||||
|
||||
nodes_to_start = [
|
||||
rbs_interface,
|
||||
pose_estimation,
|
||||
object_detection,
|
||||
]
|
||||
|
|
126
rbs_perception/scripts/cuboid.py
Executable file
126
rbs_perception/scripts/cuboid.py
Executable file
|
@ -0,0 +1,126 @@
|
|||
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
|
||||
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
|
||||
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
|
||||
|
||||
from enum import IntEnum, unique
|
||||
import numpy as np
|
||||
import cv2
|
||||
|
||||
# Related to the object's local coordinate system
|
||||
# @unique
|
||||
class CuboidVertexType(IntEnum):
|
||||
FrontTopRight = 0
|
||||
FrontTopLeft = 1
|
||||
FrontBottomLeft = 2
|
||||
FrontBottomRight = 3
|
||||
RearTopRight = 4
|
||||
RearTopLeft = 5
|
||||
RearBottomLeft = 6
|
||||
RearBottomRight = 7
|
||||
Center = 8
|
||||
TotalCornerVertexCount = 8 # Corner vertexes doesn't include the center point
|
||||
TotalVertexCount = 9
|
||||
|
||||
|
||||
# List of the vertex indexes in each line edges of the cuboid
|
||||
CuboidLineIndexes = [
|
||||
# Front face
|
||||
[CuboidVertexType.FrontTopLeft, CuboidVertexType.FrontTopRight],
|
||||
[CuboidVertexType.FrontTopRight, CuboidVertexType.FrontBottomRight],
|
||||
[CuboidVertexType.FrontBottomRight, CuboidVertexType.FrontBottomLeft],
|
||||
[CuboidVertexType.FrontBottomLeft, CuboidVertexType.FrontTopLeft],
|
||||
# Back face
|
||||
[CuboidVertexType.RearTopLeft, CuboidVertexType.RearTopRight],
|
||||
[CuboidVertexType.RearTopRight, CuboidVertexType.RearBottomRight],
|
||||
[CuboidVertexType.RearBottomRight, CuboidVertexType.RearBottomLeft],
|
||||
[CuboidVertexType.RearBottomLeft, CuboidVertexType.RearTopLeft],
|
||||
# Left face
|
||||
[CuboidVertexType.FrontBottomLeft, CuboidVertexType.RearBottomLeft],
|
||||
[CuboidVertexType.FrontTopLeft, CuboidVertexType.RearTopLeft],
|
||||
# Right face
|
||||
[CuboidVertexType.FrontBottomRight, CuboidVertexType.RearBottomRight],
|
||||
[CuboidVertexType.FrontTopRight, CuboidVertexType.RearTopRight],
|
||||
]
|
||||
|
||||
|
||||
# ========================= Cuboid3d =========================
|
||||
class Cuboid3d:
|
||||
"""This class contains a 3D cuboid."""
|
||||
|
||||
# Create a box with a certain size
|
||||
def __init__(
|
||||
self,
|
||||
size3d=[1.0, 1.0, 1.0],
|
||||
center_location=[0, 0, 0],
|
||||
coord_system=None,
|
||||
parent_object=None,
|
||||
):
|
||||
|
||||
# NOTE: This local coordinate system is similar
|
||||
# to the intrinsic transform matrix of a 3d object
|
||||
self.center_location = center_location
|
||||
self.coord_system = coord_system
|
||||
self.size3d = size3d
|
||||
self._vertices = [0, 0, 0] * CuboidVertexType.TotalVertexCount
|
||||
|
||||
self.generate_vertexes()
|
||||
|
||||
def get_vertex(self, vertex_type):
|
||||
"""Returns the location of a vertex.
|
||||
|
||||
Args:
|
||||
vertex_type: enum of type CuboidVertexType
|
||||
|
||||
Returns:
|
||||
Numpy array(3) - Location of the vertex type in the cuboid
|
||||
"""
|
||||
return self._vertices[vertex_type]
|
||||
|
||||
def get_vertices(self):
|
||||
return self._vertices
|
||||
|
||||
def generate_vertexes(self):
|
||||
width, height, depth = self.size3d
|
||||
|
||||
# By default just use the normal OpenCV coordinate system
|
||||
if self.coord_system is None:
|
||||
cx, cy, cz = self.center_location
|
||||
# X axis point to the right
|
||||
right = cx + width / 2.0
|
||||
left = cx - width / 2.0
|
||||
# Y axis point downward
|
||||
top = cy - height / 2.0
|
||||
bottom = cy + height / 2.0
|
||||
# Z axis point forward
|
||||
front = cz + depth / 2.0
|
||||
rear = cz - depth / 2.0
|
||||
|
||||
# List of 8 vertices of the box
|
||||
self._vertices = [
|
||||
[right, top, front], # Front Top Right
|
||||
[left, top, front], # Front Top Left
|
||||
[left, bottom, front], # Front Bottom Left
|
||||
[right, bottom, front], # Front Bottom Right
|
||||
[right, top, rear], # Rear Top Right
|
||||
[left, top, rear], # Rear Top Left
|
||||
[left, bottom, rear], # Rear Bottom Left
|
||||
[right, bottom, rear], # Rear Bottom Right
|
||||
self.center_location, # Center
|
||||
]
|
||||
else:
|
||||
sx, sy, sz = self.size3d
|
||||
forward = np.array(self.coord_system.forward, dtype=float) * sy * 0.5
|
||||
up = np.array(self.coord_system.up, dtype=float) * sz * 0.5
|
||||
right = np.array(self.coord_system.right, dtype=float) * sx * 0.5
|
||||
center = np.array(self.center_location, dtype=float)
|
||||
self._vertices = [
|
||||
center + forward + up + right, # Front Top Right
|
||||
center + forward + up - right, # Front Top Left
|
||||
center + forward - up - right, # Front Bottom Left
|
||||
center + forward - up + right, # Front Bottom Right
|
||||
center - forward + up + right, # Rear Top Right
|
||||
center - forward + up - right, # Rear Top Left
|
||||
center - forward - up - right, # Rear Bottom Left
|
||||
center - forward - up + right, # Rear Bottom Right
|
||||
self.center_location, # Center
|
||||
]
|
146
rbs_perception/scripts/cuboid_pnp_solver.py
Executable file
146
rbs_perception/scripts/cuboid_pnp_solver.py
Executable file
|
@ -0,0 +1,146 @@
|
|||
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
|
||||
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
|
||||
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from cuboid import CuboidVertexType
|
||||
from pyrr import Quaternion
|
||||
|
||||
|
||||
class CuboidPNPSolver(object):
|
||||
"""
|
||||
This class is used to find the 6-DoF pose of a cuboid given its projected vertices.
|
||||
|
||||
Runs perspective-n-point (PNP) algorithm.
|
||||
"""
|
||||
|
||||
# Class variables
|
||||
cv2version = cv2.__version__.split(".")
|
||||
cv2majorversion = int(cv2version[0])
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
object_name="",
|
||||
camera_intrinsic_matrix=None,
|
||||
cuboid3d=None,
|
||||
dist_coeffs=np.zeros((4, 1)),
|
||||
):
|
||||
self.object_name = object_name
|
||||
if not camera_intrinsic_matrix is None:
|
||||
self._camera_intrinsic_matrix = camera_intrinsic_matrix
|
||||
else:
|
||||
self._camera_intrinsic_matrix = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 0]])
|
||||
self._cuboid3d = cuboid3d
|
||||
|
||||
self._dist_coeffs = dist_coeffs
|
||||
|
||||
def set_camera_intrinsic_matrix(self, new_intrinsic_matrix):
|
||||
"""Sets the camera intrinsic matrix"""
|
||||
self._camera_intrinsic_matrix = new_intrinsic_matrix
|
||||
|
||||
def set_dist_coeffs(self, dist_coeffs):
|
||||
"""Sets the camera intrinsic matrix"""
|
||||
self._dist_coeffs = dist_coeffs
|
||||
|
||||
def solve_pnp(self, cuboid2d_points, pnp_algorithm=None):
|
||||
"""
|
||||
Detects the rotation and traslation
|
||||
of a cuboid object from its vertexes'
|
||||
2D location in the image
|
||||
"""
|
||||
|
||||
# Fallback to default PNP algorithm base on OpenCV version
|
||||
if pnp_algorithm is None:
|
||||
if CuboidPNPSolver.cv2majorversion == 2:
|
||||
pnp_algorithm = cv2.CV_ITERATIVE
|
||||
elif CuboidPNPSolver.cv2majorversion == 3:
|
||||
pnp_algorithm = cv2.SOLVEPNP_ITERATIVE
|
||||
|
||||
if pnp_algorithm is None:
|
||||
pnp_algorithm = cv2.SOLVEPNP_EPNP
|
||||
|
||||
location = None
|
||||
quaternion = None
|
||||
projected_points = cuboid2d_points
|
||||
|
||||
cuboid3d_points = np.array(self._cuboid3d.get_vertices())
|
||||
obj_2d_points = []
|
||||
obj_3d_points = []
|
||||
|
||||
for i in range(CuboidVertexType.TotalVertexCount):
|
||||
check_point_2d = cuboid2d_points[i]
|
||||
# Ignore invalid points
|
||||
if check_point_2d is None:
|
||||
continue
|
||||
obj_2d_points.append(check_point_2d)
|
||||
obj_3d_points.append(cuboid3d_points[i])
|
||||
|
||||
obj_2d_points = np.array(obj_2d_points, dtype=float)
|
||||
obj_3d_points = np.array(obj_3d_points, dtype=float)
|
||||
|
||||
valid_point_count = len(obj_2d_points)
|
||||
|
||||
# Can only do PNP if we have more than 3 valid points
|
||||
is_points_valid = valid_point_count >= 4
|
||||
|
||||
if is_points_valid:
|
||||
|
||||
ret, rvec, tvec = cv2.solvePnP(
|
||||
obj_3d_points,
|
||||
obj_2d_points,
|
||||
self._camera_intrinsic_matrix,
|
||||
self._dist_coeffs,
|
||||
flags=pnp_algorithm,
|
||||
)
|
||||
|
||||
if ret:
|
||||
location = list(x[0] for x in tvec)
|
||||
quaternion = self.convert_rvec_to_quaternion(rvec)
|
||||
|
||||
projected_points, _ = cv2.projectPoints(
|
||||
cuboid3d_points,
|
||||
rvec,
|
||||
tvec,
|
||||
self._camera_intrinsic_matrix,
|
||||
self._dist_coeffs,
|
||||
)
|
||||
projected_points = np.squeeze(projected_points)
|
||||
|
||||
# If the location.Z is negative or object is behind the camera then flip both location and rotation
|
||||
x, y, z = location
|
||||
if z < 0:
|
||||
# Get the opposite location
|
||||
location = [-x, -y, -z]
|
||||
|
||||
# Change the rotation by 180 degree
|
||||
rotate_angle = np.pi
|
||||
rotate_quaternion = Quaternion.from_axis_rotation(
|
||||
location, rotate_angle
|
||||
)
|
||||
quaternion = rotate_quaternion.cross(quaternion)
|
||||
|
||||
return location, quaternion, projected_points
|
||||
|
||||
def convert_rvec_to_quaternion(self, rvec):
|
||||
"""Convert rvec (which is log quaternion) to quaternion"""
|
||||
theta = np.sqrt(
|
||||
rvec[0] * rvec[0] + rvec[1] * rvec[1] + rvec[2] * rvec[2]
|
||||
) # in radians
|
||||
raxis = [rvec[0] / theta, rvec[1] / theta, rvec[2] / theta]
|
||||
|
||||
# pyrr's Quaternion (order is XYZW), https://pyrr.readthedocs.io/en/latest/oo_api_quaternion.html
|
||||
return Quaternion.from_axis_rotation(raxis, theta)
|
||||
|
||||
def project_points(self, rvec, tvec):
|
||||
"""Project points from model onto image using rotation, translation"""
|
||||
output_points, tmp = cv2.projectPoints(
|
||||
self.__object_vertex_coordinates,
|
||||
rvec,
|
||||
tvec,
|
||||
self.__camera_intrinsic_matrix,
|
||||
self.__dist_coeffs,
|
||||
)
|
||||
|
||||
output_points = np.squeeze(output_points)
|
||||
return output_points
|
900
rbs_perception/scripts/detector.py
Executable file
900
rbs_perception/scripts/detector.py
Executable file
|
@ -0,0 +1,900 @@
|
|||
# Copyright (c) 2018 NVIDIA Corporation. All rights reserved.
|
||||
# This work is licensed under a Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
|
||||
# https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode
|
||||
|
||||
"""
|
||||
Contains the following classes:
|
||||
- ModelData - High level information encapsulation
|
||||
- ObjectDetector - Greedy algorithm to build cuboids from belief maps
|
||||
"""
|
||||
|
||||
import time
|
||||
|
||||
import sys
|
||||
from os import path
|
||||
|
||||
import numpy as np
|
||||
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torchvision.transforms as transforms
|
||||
from torch.autograd import Variable
|
||||
import torchvision.models as models
|
||||
|
||||
from scipy.ndimage.filters import gaussian_filter
|
||||
from scipy import optimize
|
||||
|
||||
import sys
|
||||
|
||||
sys.path.append("../")
|
||||
from models import *
|
||||
|
||||
# Import the definition of the neural network model and cuboids
|
||||
from cuboid_pnp_solver import *
|
||||
|
||||
# global transform for image input
|
||||
transform = transforms.Compose(
|
||||
[
|
||||
# transforms.Scale(IMAGE_SIZE),
|
||||
# transforms.CenterCrop((imagesize,imagesize)),
|
||||
transforms.ToTensor(),
|
||||
# transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
|
||||
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
# ================================ Models ================================
|
||||
|
||||
|
||||
class DopeNetwork(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
numBeliefMap=9,
|
||||
numAffinity=16,
|
||||
stop_at_stage=6, # number of stages to process (if less than total number of stages)
|
||||
):
|
||||
super(DopeNetwork, self).__init__()
|
||||
|
||||
self.stop_at_stage = stop_at_stage
|
||||
|
||||
vgg_full = models.vgg19(pretrained=False).features
|
||||
self.vgg = nn.Sequential()
|
||||
for i_layer in range(24):
|
||||
self.vgg.add_module(str(i_layer), vgg_full[i_layer])
|
||||
|
||||
# Add some layers
|
||||
i_layer = 23
|
||||
self.vgg.add_module(
|
||||
str(i_layer), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1)
|
||||
)
|
||||
self.vgg.add_module(str(i_layer + 1), nn.ReLU(inplace=True))
|
||||
self.vgg.add_module(
|
||||
str(i_layer + 2), nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
|
||||
)
|
||||
self.vgg.add_module(str(i_layer + 3), nn.ReLU(inplace=True))
|
||||
|
||||
# print('---Belief------------------------------------------------')
|
||||
# _2 are the belief map stages
|
||||
self.m1_2 = DopeNetwork.create_stage(128, numBeliefMap, True)
|
||||
self.m2_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
self.m3_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
self.m4_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
self.m5_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
self.m6_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
|
||||
# print('---Affinity----------------------------------------------')
|
||||
# _1 are the affinity map stages
|
||||
self.m1_1 = DopeNetwork.create_stage(128, numAffinity, True)
|
||||
self.m2_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
self.m3_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
self.m4_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
self.m5_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
self.m6_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
"""Runs inference on the neural network"""
|
||||
|
||||
out1 = self.vgg(x)
|
||||
|
||||
out1_2 = self.m1_2(out1)
|
||||
out1_1 = self.m1_1(out1)
|
||||
|
||||
if self.stop_at_stage == 1:
|
||||
return [out1_2], [out1_1]
|
||||
|
||||
out2 = torch.cat([out1_2, out1_1, out1], 1)
|
||||
out2_2 = self.m2_2(out2)
|
||||
out2_1 = self.m2_1(out2)
|
||||
|
||||
if self.stop_at_stage == 2:
|
||||
return [out1_2, out2_2], [out1_1, out2_1]
|
||||
|
||||
out3 = torch.cat([out2_2, out2_1, out1], 1)
|
||||
out3_2 = self.m3_2(out3)
|
||||
out3_1 = self.m3_1(out3)
|
||||
|
||||
if self.stop_at_stage == 3:
|
||||
return [out1_2, out2_2, out3_2], [out1_1, out2_1, out3_1]
|
||||
|
||||
out4 = torch.cat([out3_2, out3_1, out1], 1)
|
||||
out4_2 = self.m4_2(out4)
|
||||
out4_1 = self.m4_1(out4)
|
||||
|
||||
if self.stop_at_stage == 4:
|
||||
return [out1_2, out2_2, out3_2, out4_2], [out1_1, out2_1, out3_1, out4_1]
|
||||
|
||||
out5 = torch.cat([out4_2, out4_1, out1], 1)
|
||||
out5_2 = self.m5_2(out5)
|
||||
out5_1 = self.m5_1(out5)
|
||||
|
||||
if self.stop_at_stage == 5:
|
||||
return [out1_2, out2_2, out3_2, out4_2, out5_2], [
|
||||
out1_1,
|
||||
out2_1,
|
||||
out3_1,
|
||||
out4_1,
|
||||
out5_1,
|
||||
]
|
||||
|
||||
out6 = torch.cat([out5_2, out5_1, out1], 1)
|
||||
out6_2 = self.m6_2(out6)
|
||||
out6_1 = self.m6_1(out6)
|
||||
|
||||
return [out1_2, out2_2, out3_2, out4_2, out5_2, out6_2], [
|
||||
out1_1,
|
||||
out2_1,
|
||||
out3_1,
|
||||
out4_1,
|
||||
out5_1,
|
||||
out6_1,
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def create_stage(in_channels, out_channels, first=False):
|
||||
"""Create the neural network layers for a single stage."""
|
||||
|
||||
model = nn.Sequential()
|
||||
mid_channels = 128
|
||||
if first:
|
||||
padding = 1
|
||||
kernel = 3
|
||||
count = 6
|
||||
final_channels = 512
|
||||
else:
|
||||
padding = 3
|
||||
kernel = 7
|
||||
count = 10
|
||||
final_channels = mid_channels
|
||||
|
||||
# First convolution
|
||||
model.add_module(
|
||||
"0",
|
||||
nn.Conv2d(
|
||||
in_channels, mid_channels, kernel_size=kernel, stride=1, padding=padding
|
||||
),
|
||||
)
|
||||
|
||||
# Middle convolutions
|
||||
i = 1
|
||||
while i < count - 1:
|
||||
model.add_module(str(i), nn.ReLU(inplace=True))
|
||||
i += 1
|
||||
model.add_module(
|
||||
str(i),
|
||||
nn.Conv2d(
|
||||
mid_channels,
|
||||
mid_channels,
|
||||
kernel_size=kernel,
|
||||
stride=1,
|
||||
padding=padding,
|
||||
),
|
||||
)
|
||||
i += 1
|
||||
|
||||
# Penultimate convolution
|
||||
model.add_module(str(i), nn.ReLU(inplace=True))
|
||||
i += 1
|
||||
model.add_module(
|
||||
str(i), nn.Conv2d(mid_channels, final_channels, kernel_size=1, stride=1)
|
||||
)
|
||||
i += 1
|
||||
|
||||
# Last convolution
|
||||
model.add_module(str(i), nn.ReLU(inplace=True))
|
||||
i += 1
|
||||
model.add_module(
|
||||
str(i), nn.Conv2d(final_channels, out_channels, kernel_size=1, stride=1)
|
||||
)
|
||||
i += 1
|
||||
|
||||
return model
|
||||
|
||||
|
||||
class ModelData(object):
|
||||
"""This class contains methods for loading the neural network"""
|
||||
|
||||
def __init__(self, name="", net_path="", gpu_id=0, architecture="dope"):
|
||||
self.name = name
|
||||
self.net_path = net_path # Path to trained network model
|
||||
self.net = None # Trained network
|
||||
self.gpu_id = gpu_id
|
||||
self.architecture = architecture
|
||||
|
||||
def get_net(self):
|
||||
"""Returns network"""
|
||||
if not self.net:
|
||||
self.load_net_model()
|
||||
return self.net
|
||||
|
||||
def load_net_model(self):
|
||||
"""Loads network model from disk"""
|
||||
if not self.net and path.exists(self.net_path):
|
||||
self.net = self.load_net_model_path(self.net_path)
|
||||
if not path.exists(self.net_path):
|
||||
print("ERROR: Unable to find model weights: '{}'".format(self.net_path))
|
||||
exit(0)
|
||||
|
||||
def load_net_model_path(self, path):
|
||||
"""Loads network model from disk with given path"""
|
||||
model_loading_start_time = time.time()
|
||||
print("Loading DOPE model '{}'...".format(path))
|
||||
net = DopeNetwork()
|
||||
|
||||
net = torch.nn.DataParallel(net, [0]).cuda()
|
||||
net.load_state_dict(torch.load(path))
|
||||
net.eval()
|
||||
print(
|
||||
" Model loaded in {:.2f} seconds.".format(
|
||||
time.time() - model_loading_start_time
|
||||
)
|
||||
)
|
||||
return net
|
||||
|
||||
def __str__(self):
|
||||
"""Converts to string"""
|
||||
return "{}: {}".format(self.name, self.net_path)
|
||||
|
||||
|
||||
# ================================ ObjectDetector ================================
|
||||
class ObjectDetector(object):
|
||||
"""This class contains methods for object detection"""
|
||||
|
||||
@staticmethod
|
||||
def gaussian(height, center_x, center_y, width_x, width_y):
|
||||
"""Returns a gaussian function with the given parameters"""
|
||||
width_x = float(width_x)
|
||||
width_y = float(width_y)
|
||||
return lambda x, y: height * np.exp(
|
||||
-(((center_x - x) / width_x) ** 2 + ((center_y - y) / width_y) ** 2) / 2
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def moments(data):
|
||||
"""Returns (height, x, y, width_x, width_y)
|
||||
the gaussian parameters of a 2D distribution by calculating its
|
||||
moments"""
|
||||
total = data.sum()
|
||||
X, Y = np.indices(data.shape)
|
||||
x = (X * data).sum() / total
|
||||
y = (Y * data).sum() / total
|
||||
col = data[:, int(y)]
|
||||
width_x = np.sqrt(
|
||||
np.abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum()
|
||||
)
|
||||
row = data[int(x), :]
|
||||
width_y = np.sqrt(
|
||||
np.abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum()
|
||||
)
|
||||
height = data.max()
|
||||
return height, x, y, width_x, width_y
|
||||
|
||||
@staticmethod
|
||||
def fitgaussian(data):
|
||||
"""Returns (height, x, y, width_x, width_y)
|
||||
the gaussian parameters of a 2D distribution found by a fit"""
|
||||
params = ObjectDetector.moments(data)
|
||||
errorfunction = lambda p: np.ravel(
|
||||
ObjectDetector.gaussian(*p)(*np.indices(data.shape)) - data
|
||||
)
|
||||
p, success = optimize.leastsq(errorfunction, params)
|
||||
return p
|
||||
|
||||
@staticmethod
|
||||
def make_grid(
|
||||
tensor,
|
||||
nrow=8,
|
||||
padding=2,
|
||||
normalize=False,
|
||||
range_=None,
|
||||
scale_each=False,
|
||||
pad_value=0,
|
||||
):
|
||||
"""Make a grid of images.
|
||||
Args:
|
||||
tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
|
||||
or a list of images all of the same size.
|
||||
nrow (int, optional): Number of images displayed in each row of the grid.
|
||||
The Final grid size is (B / nrow, nrow). Default is 8.
|
||||
padding (int, optional): amount of padding. Default is 2.
|
||||
normalize (bool, optional): If True, shift the image to the range (0, 1),
|
||||
by subtracting the minimum and dividing by the maximum pixel value.
|
||||
range (tuple, optional): tuple (min, max) where min and max are numbers,
|
||||
then these numbers are used to normalize the image. By default, min and max
|
||||
are computed from the tensor.
|
||||
scale_each (bool, optional): If True, scale each image in the batch of
|
||||
images separately rather than the (min, max) over all images.
|
||||
pad_value (float, optional): Value for the padded pixels.
|
||||
Example:
|
||||
See this notebook `here <https://gist.github.com/anonymous/bf16430f7750c023141c562f3e9f2a91>`_
|
||||
"""
|
||||
import math
|
||||
|
||||
if not (
|
||||
torch.is_tensor(tensor)
|
||||
or (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))
|
||||
):
|
||||
raise TypeError(
|
||||
"tensor or list of tensors expected, got {}".format(type(tensor))
|
||||
)
|
||||
|
||||
# if list of tensors, convert to a 4D mini-batch Tensor
|
||||
if isinstance(tensor, list):
|
||||
tensor = torch.stack(tensor, dim=0)
|
||||
|
||||
if tensor.dim() == 2: # single image H x W
|
||||
tensor = tensor.view(1, tensor.size(0), tensor.size(1))
|
||||
if tensor.dim() == 3: # single image
|
||||
if tensor.size(0) == 1: # if single-channel, convert to 3-channel
|
||||
tensor = torch.cat((tensor, tensor, tensor), 0)
|
||||
tensor = tensor.view(1, tensor.size(0), tensor.size(1), tensor.size(2))
|
||||
|
||||
if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images
|
||||
tensor = torch.cat((tensor, tensor, tensor), 1)
|
||||
|
||||
if normalize is True:
|
||||
tensor = tensor.clone() # avoid modifying tensor in-place
|
||||
if range_ is not None:
|
||||
assert isinstance(
|
||||
range_, tuple
|
||||
), "range has to be a tuple (min, max) if specified. min and max are numbers"
|
||||
|
||||
def norm_ip(img, min, max):
|
||||
img.clamp_(min=min, max=max)
|
||||
img.add_(-min).div_(max - min + 1e-5)
|
||||
|
||||
def norm_range(t, range_):
|
||||
if range_ is not None:
|
||||
norm_ip(t, range_[0], range_[1])
|
||||
else:
|
||||
norm_ip(t, float(t.min()), float(t.max()))
|
||||
|
||||
if scale_each is True:
|
||||
for t in tensor: # loop over mini-batch dimension
|
||||
norm_range(t, range)
|
||||
else:
|
||||
norm_range(tensor, range)
|
||||
|
||||
if tensor.size(0) == 1:
|
||||
return tensor.squeeze()
|
||||
|
||||
# make the mini-batch of images into a grid
|
||||
nmaps = tensor.size(0)
|
||||
xmaps = min(nrow, nmaps)
|
||||
ymaps = int(math.ceil(float(nmaps) / xmaps))
|
||||
height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding)
|
||||
grid = tensor.new(3, height * ymaps + padding, width * xmaps + padding).fill_(
|
||||
pad_value
|
||||
)
|
||||
k = 0
|
||||
for y in range(ymaps):
|
||||
for x in range(xmaps):
|
||||
if k >= nmaps:
|
||||
break
|
||||
grid.narrow(1, y * height + padding, height - padding).narrow(
|
||||
2, x * width + padding, width - padding
|
||||
).copy_(tensor[k])
|
||||
k = k + 1
|
||||
return grid
|
||||
|
||||
@staticmethod
|
||||
def get_image_grid(tensor, filename, nrow=3, padding=2, mean=None, std=None):
|
||||
"""
|
||||
Saves a given Tensor into an image file.
|
||||
If given a mini-batch tensor, will save the tensor as a grid of images.
|
||||
"""
|
||||
from PIL import Image
|
||||
|
||||
# tensor = tensor.cpu()
|
||||
grid = ObjectDetector.make_grid(tensor, nrow=nrow, padding=10, pad_value=1)
|
||||
if not mean is None:
|
||||
# ndarr = grid.mul(std).add(mean).mul(255).byte().transpose(0,2).transpose(0,1).numpy()
|
||||
ndarr = (
|
||||
grid.mul(std)
|
||||
.add(mean)
|
||||
.mul(255)
|
||||
.byte()
|
||||
.transpose(0, 2)
|
||||
.transpose(0, 1)
|
||||
.numpy()
|
||||
)
|
||||
else:
|
||||
ndarr = (
|
||||
grid.mul(0.5)
|
||||
.add(0.5)
|
||||
.mul(255)
|
||||
.byte()
|
||||
.transpose(0, 2)
|
||||
.transpose(0, 1)
|
||||
.numpy()
|
||||
)
|
||||
im = Image.fromarray(ndarr)
|
||||
# im.save(filename)
|
||||
return im
|
||||
|
||||
@staticmethod
|
||||
def detect_object_in_image(
|
||||
net_model, pnp_solver, in_img, config, grid_belief_debug=False, norm_belief=True
|
||||
):
|
||||
"""Detect objects in a image using a specific trained network model
|
||||
Returns the poses of the objects and the belief maps
|
||||
"""
|
||||
|
||||
if in_img is None:
|
||||
return []
|
||||
|
||||
# print("detect_object_in_image - image shape: {}".format(in_img.shape))
|
||||
|
||||
# Run network inference
|
||||
image_tensor = transform(in_img)
|
||||
image_torch = Variable(image_tensor).cuda().unsqueeze(0)
|
||||
out, seg = net_model(
|
||||
image_torch
|
||||
) # run inference using the network (calls 'forward' method)
|
||||
vertex2 = out[-1][0]
|
||||
aff = seg[-1][0]
|
||||
|
||||
# Find objects from network output
|
||||
detected_objects = ObjectDetector.find_object_poses(
|
||||
vertex2, aff, pnp_solver, config
|
||||
)
|
||||
|
||||
if not grid_belief_debug:
|
||||
|
||||
return detected_objects, None
|
||||
else:
|
||||
# Run the belief maps debug display on the beliefmaps
|
||||
|
||||
upsampling = nn.UpsamplingNearest2d(scale_factor=8)
|
||||
tensor = vertex2
|
||||
belief_imgs = []
|
||||
in_img = torch.tensor(in_img).float() / 255.0
|
||||
in_img *= 0.7
|
||||
|
||||
for j in range(tensor.size()[0]):
|
||||
belief = tensor[j].clone()
|
||||
if norm_belief:
|
||||
belief -= float(torch.min(belief)[0].data.cpu().numpy())
|
||||
belief /= float(torch.max(belief)[0].data.cpu().numpy())
|
||||
|
||||
belief = (
|
||||
upsampling(belief.unsqueeze(0).unsqueeze(0))
|
||||
.squeeze()
|
||||
.squeeze()
|
||||
.data
|
||||
)
|
||||
belief = torch.clamp(belief, 0, 1).cpu()
|
||||
belief = torch.cat(
|
||||
[
|
||||
belief.unsqueeze(0) + in_img[:, :, 0],
|
||||
belief.unsqueeze(0) + in_img[:, :, 1],
|
||||
belief.unsqueeze(0) + in_img[:, :, 2],
|
||||
]
|
||||
).unsqueeze(0)
|
||||
belief = torch.clamp(belief, 0, 1)
|
||||
|
||||
# belief_imgs.append(belief.data.squeeze().cpu().numpy().transpose(1,2,0))
|
||||
belief_imgs.append(belief.data.squeeze().numpy())
|
||||
|
||||
# Create the image grid
|
||||
belief_imgs = torch.tensor(np.array(belief_imgs))
|
||||
|
||||
im_belief = ObjectDetector.get_image_grid(belief_imgs, None, mean=0, std=1)
|
||||
|
||||
return detected_objects, im_belief
|
||||
|
||||
@staticmethod
|
||||
def find_object_poses(
|
||||
vertex2,
|
||||
aff,
|
||||
pnp_solver,
|
||||
config,
|
||||
run_sampling=False,
|
||||
num_sample=100,
|
||||
scale_factor=8,
|
||||
):
|
||||
"""Detect objects given network output"""
|
||||
|
||||
# run_sampling = True
|
||||
|
||||
# Detect objects from belief maps and affinities
|
||||
objects, all_peaks = ObjectDetector.find_objects(
|
||||
vertex2,
|
||||
aff,
|
||||
config,
|
||||
run_sampling=run_sampling,
|
||||
num_sample=num_sample,
|
||||
scale_factor=scale_factor,
|
||||
)
|
||||
detected_objects = []
|
||||
obj_name = pnp_solver.object_name
|
||||
|
||||
print(all_peaks)
|
||||
|
||||
# print("find_object_poses: found {} objects ================".format(len(objects)))
|
||||
for obj in objects:
|
||||
# Run PNP
|
||||
points = obj[1] + [(obj[0][0] * scale_factor, obj[0][1] * scale_factor)]
|
||||
print(points)
|
||||
cuboid2d = np.copy(points)
|
||||
location, quaternion, projected_points = pnp_solver.solve_pnp(points)
|
||||
|
||||
# run multiple sample
|
||||
if run_sampling:
|
||||
lx, ly, lz = [], [], []
|
||||
qx, qy, qz, qw = [], [], [], []
|
||||
|
||||
for i_sample in range(num_sample):
|
||||
sample = []
|
||||
for i_point in range(len(obj[-1])):
|
||||
if not obj[-1][i_point][i_sample] is None:
|
||||
sample.append(
|
||||
(
|
||||
obj[-1][i_point][i_sample][0] * scale_factor,
|
||||
obj[-1][i_point][i_sample][1] * scale_factor,
|
||||
)
|
||||
)
|
||||
else:
|
||||
sample.append(None)
|
||||
# final_cuboids.append(sample)
|
||||
pnp_sample = pnp_solver.solve_pnp(sample)
|
||||
|
||||
try:
|
||||
lx.append(pnp_sample[0][0])
|
||||
ly.append(pnp_sample[0][1])
|
||||
lz.append(pnp_sample[0][2])
|
||||
|
||||
qx.append(pnp_sample[1][0])
|
||||
qy.append(pnp_sample[1][1])
|
||||
qz.append(pnp_sample[1][2])
|
||||
qw.append(pnp_sample[1][3])
|
||||
except:
|
||||
pass
|
||||
# TODO
|
||||
# RUN quaternion as well for the std and avg.
|
||||
|
||||
try:
|
||||
print("----")
|
||||
print("location:")
|
||||
print(location[0], location[1], location[2])
|
||||
print(np.mean(lx), np.mean(ly), np.mean(lz))
|
||||
print(np.std(lx), np.std(ly), np.std(lz))
|
||||
print("quaternion:")
|
||||
print(quaternion[0], quaternion[1], quaternion[2], quaternion[3])
|
||||
print(np.mean(qx), np.mean(qy), np.mean(qz), np.mean(qw))
|
||||
print(np.std(qx), np.std(qy), np.std(qz), np.std(qw))
|
||||
|
||||
except:
|
||||
pass
|
||||
if not location is None:
|
||||
detected_objects.append(
|
||||
{
|
||||
"name": obj_name,
|
||||
"location": location,
|
||||
"quaternion": quaternion,
|
||||
"cuboid2d": cuboid2d,
|
||||
"projected_points": projected_points,
|
||||
"confidence": obj[-1],
|
||||
"raw_points": points,
|
||||
}
|
||||
)
|
||||
|
||||
# print("find_object_poses: points = ", type(points), points)
|
||||
# print("find_object_poses: locn = ", location, "quat =", quaternion)
|
||||
# print("find_object_poses: projected_points = ", type(projected_points), projected_points)
|
||||
|
||||
return detected_objects
|
||||
|
||||
@staticmethod
|
||||
def find_objects(
|
||||
vertex2,
|
||||
aff,
|
||||
config,
|
||||
numvertex=8,
|
||||
run_sampling=False,
|
||||
num_sample=100,
|
||||
scale_factor=8,
|
||||
):
|
||||
"""Detects objects given network belief maps and affinities, using heuristic method"""
|
||||
|
||||
all_peaks = []
|
||||
all_samples = []
|
||||
|
||||
peak_counter = 0
|
||||
for j in range(vertex2.size()[0]):
|
||||
belief = vertex2[j].clone()
|
||||
map_ori = belief.cpu().data.numpy()
|
||||
|
||||
map = gaussian_filter(belief.cpu().data.numpy(), sigma=config.sigma)
|
||||
p = 1
|
||||
map_left = np.zeros(map.shape)
|
||||
map_left[p:, :] = map[:-p, :]
|
||||
map_right = np.zeros(map.shape)
|
||||
map_right[:-p, :] = map[p:, :]
|
||||
map_up = np.zeros(map.shape)
|
||||
map_up[:, p:] = map[:, :-p]
|
||||
map_down = np.zeros(map.shape)
|
||||
map_down[:, :-p] = map[:, p:]
|
||||
|
||||
peaks_binary = np.logical_and.reduce(
|
||||
(
|
||||
map >= map_left,
|
||||
map >= map_right,
|
||||
map >= map_up,
|
||||
map >= map_down,
|
||||
map > config.thresh_map,
|
||||
)
|
||||
)
|
||||
peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])
|
||||
|
||||
# Computing the weigthed average for localizing the peaks
|
||||
peaks = list(peaks)
|
||||
win = 11
|
||||
ran = win // 2
|
||||
peaks_avg = []
|
||||
point_sample_list = []
|
||||
for p_value in range(len(peaks)):
|
||||
p = peaks[p_value]
|
||||
weights = np.zeros((win, win))
|
||||
i_values = np.zeros((win, win))
|
||||
j_values = np.zeros((win, win))
|
||||
for i in range(-ran, ran + 1):
|
||||
for j in range(-ran, ran + 1):
|
||||
if (
|
||||
p[1] + i < 0
|
||||
or p[1] + i >= map_ori.shape[0]
|
||||
or p[0] + j < 0
|
||||
or p[0] + j >= map_ori.shape[1]
|
||||
):
|
||||
continue
|
||||
|
||||
i_values[j + ran, i + ran] = p[1] + i
|
||||
j_values[j + ran, i + ran] = p[0] + j
|
||||
|
||||
weights[j + ran, i + ran] = map_ori[p[1] + i, p[0] + j]
|
||||
# if the weights are all zeros
|
||||
# then add the none continuous points
|
||||
OFFSET_DUE_TO_UPSAMPLING = 0.4395
|
||||
|
||||
# Sample the points using the gaussian
|
||||
if run_sampling:
|
||||
data = weights
|
||||
params = ObjectDetector.fitgaussian(data)
|
||||
fit = ObjectDetector.gaussian(*params)
|
||||
_, mu_x, mu_y, std_x, std_y = params
|
||||
points_sample = np.random.multivariate_normal(
|
||||
np.array(
|
||||
[
|
||||
p[1] + mu_x + OFFSET_DUE_TO_UPSAMPLING,
|
||||
p[0] - mu_y + OFFSET_DUE_TO_UPSAMPLING,
|
||||
]
|
||||
),
|
||||
# np.array([[std_x*std_x,0],[0,std_y*std_y]]), size=num_sample)
|
||||
np.array([[std_x, 0], [0, std_y]]),
|
||||
size=num_sample,
|
||||
)
|
||||
point_sample_list.append(points_sample)
|
||||
|
||||
try:
|
||||
peaks_avg.append(
|
||||
(
|
||||
np.average(j_values, weights=weights)
|
||||
+ OFFSET_DUE_TO_UPSAMPLING,
|
||||
np.average(i_values, weights=weights)
|
||||
+ OFFSET_DUE_TO_UPSAMPLING,
|
||||
)
|
||||
)
|
||||
except:
|
||||
peaks_avg.append(
|
||||
(
|
||||
p[0] + OFFSET_DUE_TO_UPSAMPLING,
|
||||
p[1] + OFFSET_DUE_TO_UPSAMPLING,
|
||||
)
|
||||
)
|
||||
|
||||
# Note: Python3 doesn't support len for zip object
|
||||
peaks_len = min(
|
||||
len(np.nonzero(peaks_binary)[1]), len(np.nonzero(peaks_binary)[0])
|
||||
)
|
||||
|
||||
peaks_with_score = [
|
||||
peaks_avg[x_] + (map_ori[peaks[x_][1], peaks[x_][0]],)
|
||||
for x_ in range(len(peaks))
|
||||
]
|
||||
|
||||
id = range(peak_counter, peak_counter + peaks_len)
|
||||
|
||||
peaks_with_score_and_id = [
|
||||
peaks_with_score[i] + (id[i],) for i in range(len(id))
|
||||
]
|
||||
|
||||
all_peaks.append(peaks_with_score_and_id)
|
||||
all_samples.append(point_sample_list)
|
||||
peak_counter += peaks_len
|
||||
|
||||
objects = []
|
||||
|
||||
if aff is None:
|
||||
# Assume there is only one object
|
||||
points = [None for i in range(numvertex)]
|
||||
for i_peak, peaks in enumerate(all_peaks):
|
||||
# print (peaks)
|
||||
for peak in peaks:
|
||||
if peak[2] > config.threshold:
|
||||
points[i_peak] = (peak[0], peak[1])
|
||||
|
||||
return points
|
||||
|
||||
# Check object centroid and build the objects if the centroid is found
|
||||
for nb_object in range(len(all_peaks[-1])):
|
||||
if all_peaks[-1][nb_object][2] > config.thresh_points:
|
||||
objects.append(
|
||||
[
|
||||
[
|
||||
all_peaks[-1][nb_object][:2][0],
|
||||
all_peaks[-1][nb_object][:2][1],
|
||||
],
|
||||
[None for i in range(numvertex)],
|
||||
[None for i in range(numvertex)],
|
||||
all_peaks[-1][nb_object][2],
|
||||
[
|
||||
[None for j in range(num_sample)]
|
||||
for i in range(numvertex + 1)
|
||||
],
|
||||
]
|
||||
)
|
||||
|
||||
# Check if the object was added before
|
||||
if run_sampling and nb_object < len(objects):
|
||||
# add the samples to the object centroids
|
||||
objects[nb_object][4][-1] = all_samples[-1][nb_object]
|
||||
|
||||
# Working with an output that only has belief maps
|
||||
if aff is None:
|
||||
if len(objects) > 0 and len(all_peaks) > 0 and len(all_peaks[0]) > 0:
|
||||
for i_points in range(8):
|
||||
if (
|
||||
len(all_peaks[i_points]) > 0
|
||||
and all_peaks[i_points][0][2] > config.threshold
|
||||
):
|
||||
objects[0][1][i_points] = (
|
||||
all_peaks[i_points][0][0],
|
||||
all_peaks[i_points][0][1],
|
||||
)
|
||||
else:
|
||||
# For all points found
|
||||
for i_lists in range(len(all_peaks[:-1])):
|
||||
lists = all_peaks[i_lists]
|
||||
|
||||
# Candidate refers to point that needs to be match with a centroid object
|
||||
for i_candidate, candidate in enumerate(lists):
|
||||
if candidate[2] < config.thresh_points:
|
||||
continue
|
||||
|
||||
i_best = -1
|
||||
best_dist = 10000
|
||||
best_angle = 100
|
||||
|
||||
# Find the points that links to that centroid.
|
||||
for i_obj in range(len(objects)):
|
||||
center = [objects[i_obj][0][0], objects[i_obj][0][1]]
|
||||
|
||||
# integer is used to look into the affinity map,
|
||||
# but the float version is used to run
|
||||
point_int = [int(candidate[0]), int(candidate[1])]
|
||||
point = [candidate[0], candidate[1]]
|
||||
|
||||
# look at the distance to the vector field.
|
||||
v_aff = (
|
||||
np.array(
|
||||
[
|
||||
aff[
|
||||
i_lists * 2, point_int[1], point_int[0]
|
||||
].data.item(),
|
||||
aff[
|
||||
i_lists * 2 + 1, point_int[1], point_int[0]
|
||||
].data.item(),
|
||||
]
|
||||
)
|
||||
* 10
|
||||
)
|
||||
|
||||
# normalize the vector
|
||||
xvec = v_aff[0]
|
||||
yvec = v_aff[1]
|
||||
|
||||
norms = np.sqrt(xvec * xvec + yvec * yvec)
|
||||
|
||||
xvec /= norms
|
||||
yvec /= norms
|
||||
|
||||
v_aff = np.concatenate([[xvec], [yvec]])
|
||||
|
||||
v_center = np.array(center) - np.array(point)
|
||||
xvec = v_center[0]
|
||||
yvec = v_center[1]
|
||||
|
||||
norms = np.sqrt(xvec * xvec + yvec * yvec)
|
||||
|
||||
xvec /= norms
|
||||
yvec /= norms
|
||||
|
||||
v_center = np.concatenate([[xvec], [yvec]])
|
||||
|
||||
# vector affinity
|
||||
dist_angle = np.linalg.norm(v_center - v_aff)
|
||||
|
||||
# distance between vertexes
|
||||
dist_point = np.linalg.norm(np.array(point) - np.array(center))
|
||||
|
||||
if (
|
||||
dist_angle < config.thresh_angle
|
||||
and best_dist > 1000
|
||||
or dist_angle < config.thresh_angle
|
||||
and best_dist > dist_point
|
||||
):
|
||||
i_best = i_obj
|
||||
best_angle = dist_angle
|
||||
best_dist = dist_point
|
||||
|
||||
if i_best == -1:
|
||||
continue
|
||||
|
||||
if (
|
||||
objects[i_best][1][i_lists] is None
|
||||
or best_angle < config.thresh_angle
|
||||
and best_dist < objects[i_best][2][i_lists][1]
|
||||
):
|
||||
# set the points
|
||||
objects[i_best][1][i_lists] = (
|
||||
(candidate[0]) * scale_factor,
|
||||
(candidate[1]) * scale_factor,
|
||||
)
|
||||
# set information about the points: angle and distance
|
||||
objects[i_best][2][i_lists] = (best_angle, best_dist)
|
||||
# add the sample points
|
||||
if run_sampling:
|
||||
objects[i_best][4][i_lists] = all_samples[i_lists][
|
||||
i_candidate
|
||||
]
|
||||
return objects, all_peaks
|
196
rbs_perception/scripts/models.py
Executable file
196
rbs_perception/scripts/models.py
Executable file
|
@ -0,0 +1,196 @@
|
|||
"""
|
||||
NVIDIA from jtremblay@gmail.com
|
||||
"""
|
||||
|
||||
# Networks
|
||||
import torch
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
import torch.nn.parallel
|
||||
import torch.utils.data
|
||||
import torchvision.models as models
|
||||
|
||||
|
||||
class DopeNetwork(nn.Module):
|
||||
def __init__(
|
||||
self,
|
||||
pretrained=False,
|
||||
numBeliefMap=9,
|
||||
numAffinity=16,
|
||||
stop_at_stage=6, # number of stages to process (if less than total number of stages)
|
||||
):
|
||||
super(DopeNetwork, self).__init__()
|
||||
|
||||
self.stop_at_stage = stop_at_stage
|
||||
|
||||
vgg_full = models.vgg19(pretrained=False).features
|
||||
self.vgg = nn.Sequential()
|
||||
for i_layer in range(24):
|
||||
self.vgg.add_module(str(i_layer), vgg_full[i_layer])
|
||||
|
||||
# Add some layers
|
||||
i_layer = 23
|
||||
self.vgg.add_module(
|
||||
str(i_layer), nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1)
|
||||
)
|
||||
self.vgg.add_module(str(i_layer + 1), nn.ReLU(inplace=True))
|
||||
self.vgg.add_module(
|
||||
str(i_layer + 2), nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1)
|
||||
)
|
||||
self.vgg.add_module(str(i_layer + 3), nn.ReLU(inplace=True))
|
||||
|
||||
# print('---Belief------------------------------------------------')
|
||||
# _2 are the belief map stages
|
||||
self.m1_2 = DopeNetwork.create_stage(128, numBeliefMap, True)
|
||||
self.m2_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
self.m3_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
self.m4_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
self.m5_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
self.m6_2 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numBeliefMap, False
|
||||
)
|
||||
|
||||
# print('---Affinity----------------------------------------------')
|
||||
# _1 are the affinity map stages
|
||||
self.m1_1 = DopeNetwork.create_stage(128, numAffinity, True)
|
||||
self.m2_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
self.m3_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
self.m4_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
self.m5_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
self.m6_1 = DopeNetwork.create_stage(
|
||||
128 + numBeliefMap + numAffinity, numAffinity, False
|
||||
)
|
||||
|
||||
def forward(self, x):
|
||||
"""Runs inference on the neural network"""
|
||||
|
||||
out1 = self.vgg(x)
|
||||
|
||||
out1_2 = self.m1_2(out1)
|
||||
out1_1 = self.m1_1(out1)
|
||||
|
||||
if self.stop_at_stage == 1:
|
||||
return [out1_2], [out1_1]
|
||||
|
||||
out2 = torch.cat([out1_2, out1_1, out1], 1)
|
||||
out2_2 = self.m2_2(out2)
|
||||
out2_1 = self.m2_1(out2)
|
||||
|
||||
if self.stop_at_stage == 2:
|
||||
return [out1_2, out2_2], [out1_1, out2_1]
|
||||
|
||||
out3 = torch.cat([out2_2, out2_1, out1], 1)
|
||||
out3_2 = self.m3_2(out3)
|
||||
out3_1 = self.m3_1(out3)
|
||||
|
||||
if self.stop_at_stage == 3:
|
||||
return [out1_2, out2_2, out3_2], [out1_1, out2_1, out3_1]
|
||||
|
||||
out4 = torch.cat([out3_2, out3_1, out1], 1)
|
||||
out4_2 = self.m4_2(out4)
|
||||
out4_1 = self.m4_1(out4)
|
||||
|
||||
if self.stop_at_stage == 4:
|
||||
return [out1_2, out2_2, out3_2, out4_2], [out1_1, out2_1, out3_1, out4_1]
|
||||
|
||||
out5 = torch.cat([out4_2, out4_1, out1], 1)
|
||||
out5_2 = self.m5_2(out5)
|
||||
out5_1 = self.m5_1(out5)
|
||||
|
||||
if self.stop_at_stage == 5:
|
||||
return [out1_2, out2_2, out3_2, out4_2, out5_2], [
|
||||
out1_1,
|
||||
out2_1,
|
||||
out3_1,
|
||||
out4_1,
|
||||
out5_1,
|
||||
]
|
||||
|
||||
out6 = torch.cat([out5_2, out5_1, out1], 1)
|
||||
out6_2 = self.m6_2(out6)
|
||||
out6_1 = self.m6_1(out6)
|
||||
|
||||
return [out1_2, out2_2, out3_2, out4_2, out5_2, out6_2], [
|
||||
out1_1,
|
||||
out2_1,
|
||||
out3_1,
|
||||
out4_1,
|
||||
out5_1,
|
||||
out6_1,
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def create_stage(in_channels, out_channels, first=False):
|
||||
"""Create the neural network layers for a single stage."""
|
||||
|
||||
model = nn.Sequential()
|
||||
mid_channels = 128
|
||||
if first:
|
||||
padding = 1
|
||||
kernel = 3
|
||||
count = 6
|
||||
final_channels = 512
|
||||
else:
|
||||
padding = 3
|
||||
kernel = 7
|
||||
count = 10
|
||||
final_channels = mid_channels
|
||||
|
||||
# First convolution
|
||||
model.add_module(
|
||||
"0",
|
||||
nn.Conv2d(
|
||||
in_channels, mid_channels, kernel_size=kernel, stride=1, padding=padding
|
||||
),
|
||||
)
|
||||
|
||||
# Middle convolutions
|
||||
i = 1
|
||||
while i < count - 1:
|
||||
model.add_module(str(i), nn.ReLU(inplace=True))
|
||||
i += 1
|
||||
model.add_module(
|
||||
str(i),
|
||||
nn.Conv2d(
|
||||
mid_channels,
|
||||
mid_channels,
|
||||
kernel_size=kernel,
|
||||
stride=1,
|
||||
padding=padding,
|
||||
),
|
||||
)
|
||||
i += 1
|
||||
|
||||
# Penultimate convolution
|
||||
model.add_module(str(i), nn.ReLU(inplace=True))
|
||||
i += 1
|
||||
model.add_module(
|
||||
str(i), nn.Conv2d(mid_channels, final_channels, kernel_size=1, stride=1)
|
||||
)
|
||||
i += 1
|
||||
|
||||
# Last convolution
|
||||
model.add_module(str(i), nn.ReLU(inplace=True))
|
||||
i += 1
|
||||
model.add_module(
|
||||
str(i), nn.Conv2d(final_channels, out_channels, kernel_size=1, stride=1)
|
||||
)
|
||||
i += 1
|
||||
|
||||
return model
|
411
rbs_perception/scripts/pe_dope_lc.py
Executable file
411
rbs_perception/scripts/pe_dope_lc.py
Executable file
|
@ -0,0 +1,411 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
pose_estimation_lifecycle_node_with_DOPE
|
||||
ROS 2 program for 6D Pose Estimation
|
||||
|
||||
Source run:
|
||||
python inference.py --weights ../output/weights_2996 --data ../sample_dataset100 --object fork --exts jpg \
|
||||
--config config/config_pose_fork.yaml --camera config/camera_info_640x480.yaml
|
||||
|
||||
@shalenikol release 0.3
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
|
||||
import rclpy
|
||||
from rclpy.lifecycle import Node
|
||||
from rclpy.lifecycle import State
|
||||
from rclpy.lifecycle import TransitionCallbackReturn
|
||||
|
||||
from ament_index_python.packages import get_package_share_directory
|
||||
from sensor_msgs.msg import Image, CameraInfo
|
||||
from geometry_msgs.msg import Pose
|
||||
# from tf2_ros import TransformException
|
||||
from tf2_ros.buffer import Buffer
|
||||
|
||||
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
|
||||
import cv2 # OpenCV library
|
||||
|
||||
FILE_SKILL_CONFIG = "PoseEstimation.json" # используется при запуске ноды и при запуске BT с этим навыком
|
||||
FILE_DOPE_CONFIG = "pe_dope_config.yaml"
|
||||
# FILE_TEMP_IMAGE = "image_rgb.png"
|
||||
PARAM_NAME = "str_param"
|
||||
PARAM_SKILL_CONFIG = "skill_cfg"
|
||||
CAMERA_LINK_DEFAULT = "outer_rgbd_camera"
|
||||
# NODE_NAME_DEFAULT = "lc_dope"
|
||||
|
||||
def get_transfer_path_() -> str:
|
||||
return os.path.join(get_package_share_directory("rbs_perception"), "config")
|
||||
|
||||
class PE_DOPE(Node):
|
||||
"""Pose estimation lifecycle node with DOPE."""
|
||||
def __init__(self, node_name="", **kwargs):
|
||||
"""Construct the node."""
|
||||
self.skill_cfg = self._load_config()
|
||||
ros_cfg = self.skill_cfg["ROS2"]
|
||||
self.nodeName = ros_cfg["node_name"] #node_name
|
||||
out_par = self.skill_cfg["Interface"]["Output"][0]
|
||||
self.topicSrv = self.nodeName + "/" + out_par["name"]
|
||||
# for other nodes
|
||||
kwargs["allow_undeclared_parameters"] = True
|
||||
kwargs["automatically_declare_parameters_from_overrides"] = True
|
||||
super().__init__(self.nodeName, **kwargs)
|
||||
self.declare_parameter(PARAM_NAME, rclpy.Parameter.Type.STRING)
|
||||
self.declare_parameter(PARAM_SKILL_CONFIG, rclpy.Parameter.Type.STRING)
|
||||
# Used to convert between ROS and OpenCV images
|
||||
self.br = CvBridge()
|
||||
self.dope_cfg = self._load_config_DOPE()
|
||||
|
||||
self._cam_pose = Pose()
|
||||
self.tf_buffer = Buffer()
|
||||
|
||||
self._is_camerainfo = False
|
||||
self.topicImage = ""
|
||||
self.topicCameraInfo = ""
|
||||
self.camera_link = ""
|
||||
self._set_camera_topic(CAMERA_LINK_DEFAULT)
|
||||
self._sub = None
|
||||
self._sub_info = None
|
||||
self._pub = None
|
||||
self._image_cnt = 0
|
||||
self._K = []
|
||||
|
||||
def _set_camera_topic(self, camera_link: str):
|
||||
""" Service for camera name topics """
|
||||
self.topicImage = "/" + camera_link + "/image"
|
||||
self.topicCameraInfo = "/" + camera_link +"/camera_info"
|
||||
self.camera_link = camera_link
|
||||
|
||||
def listener_camera_info(self, data):
|
||||
""" CameraInfo callback function. """
|
||||
if self._is_camerainfo: # don’t read camera info again
|
||||
return
|
||||
|
||||
self._res = [data.height, data.width]
|
||||
k_ = data.k
|
||||
self._K = [
|
||||
[k_[0], k_[1], k_[2]],
|
||||
[k_[3], k_[4], k_[5]],
|
||||
[k_[6], k_[7], k_[8]]
|
||||
]
|
||||
# set the indicator for receiving the camera info
|
||||
self._is_camerainfo = True
|
||||
|
||||
def _load_config(self):
|
||||
p = os.path.join(get_transfer_path_(), FILE_SKILL_CONFIG)
|
||||
with open(p, "r") as f:
|
||||
j = json.load(f)
|
||||
return j
|
||||
|
||||
def _load_config_DOPE(self):
|
||||
p = os.path.join(get_transfer_path_(), FILE_DOPE_CONFIG)
|
||||
with open(p, "r") as f:
|
||||
y = yaml.load(f, Loader=yaml.FullLoader)
|
||||
return y
|
||||
|
||||
def on_configure(self, state: State) -> TransitionCallbackReturn:
|
||||
"""
|
||||
Configure the node, after a configuring transition is requested.
|
||||
|
||||
return: The state machine either invokes a transition to the "inactive" state or stays
|
||||
in "unconfigured" depending on the return value.
|
||||
TransitionCallbackReturn.SUCCESS transitions to "inactive".
|
||||
TransitionCallbackReturn.FAILURE transitions to "unconfigured".
|
||||
TransitionCallbackReturn.ERROR or any uncaught exceptions to "errorprocessing"
|
||||
"""
|
||||
yaml_param = self.get_parameter(PARAM_NAME).get_parameter_value().string_value
|
||||
str_param = yaml.load(yaml_param, Loader=yaml.FullLoader)
|
||||
# !!! вместо PARAM_SKILL_CONFIG можно использовать self.skill_cfg !!! (это одно и то же)
|
||||
# yaml_param = self.get_parameter(PARAM_SKILL_CONFIG).get_parameter_value().string_value
|
||||
# pe_cfg = yaml.load(yaml_param, Loader=yaml.FullLoader)
|
||||
|
||||
# with open("pe.yaml", "w") as f:
|
||||
# f.write(yaml_param)
|
||||
# print(skill_cfg)
|
||||
|
||||
# Create the subscribers.
|
||||
self._sub_info = self.create_subscription(CameraInfo, self.topicCameraInfo, self.listener_camera_info, 2)
|
||||
# Create the publisher.
|
||||
self._pub = self.create_lifecycle_publisher(Pose, self.topicSrv, 1)
|
||||
|
||||
# Load model weights
|
||||
w = str_param["weights_file"]
|
||||
if not os.path.isfile(w):
|
||||
self.get_logger().warning(f"No weights found <{w}>")
|
||||
return TransitionCallbackReturn.FAILURE
|
||||
|
||||
obj = str_param["object_name"]
|
||||
dim = str_param["dimensions"]
|
||||
self.dope_node = Dope(self.dope_cfg, w, obj, dim)
|
||||
|
||||
self.get_logger().info(f"'{self.nodeName}' configure is success (with '{obj}')")
|
||||
return TransitionCallbackReturn.SUCCESS
|
||||
|
||||
def on_activate(self, state: State) -> TransitionCallbackReturn:
|
||||
self.get_logger().info("on_activate is called")
|
||||
# Create the Image subscriber.
|
||||
self._sub = self.create_subscription(Image, self.topicImage, self.image_callback, 3)
|
||||
|
||||
# # !!! test
|
||||
# self._timer = self.create_timer(5, self.test_im_proc)
|
||||
|
||||
return super().on_activate(state)
|
||||
|
||||
def on_deactivate(self, state: State) -> TransitionCallbackReturn:
|
||||
self.get_logger().info("on_deactivate is called")
|
||||
|
||||
# # !!! test
|
||||
# self.destroy_timer(self._timer)
|
||||
|
||||
# Destroy the Image subscriber.
|
||||
self.destroy_subscription(self._sub)
|
||||
return super().on_deactivate(state)
|
||||
|
||||
def on_cleanup(self, state: State) -> TransitionCallbackReturn:
|
||||
# очистим параметры
|
||||
# node_param = rclpy.parameter.Parameter("mesh_path", rclpy.Parameter.Type.STRING, "")
|
||||
# all_node_param = [node_param]
|
||||
# self.set_parameters(all_node_param)
|
||||
|
||||
self._is_camerainfo = False
|
||||
|
||||
self.destroy_publisher(self._pub)
|
||||
self.destroy_subscription(self._sub_info)
|
||||
|
||||
self.get_logger().info("on_cleanup is called")
|
||||
return TransitionCallbackReturn.SUCCESS
|
||||
|
||||
def on_shutdown(self, state: State) -> TransitionCallbackReturn:
|
||||
self.get_logger().info("on_shutdown is called")
|
||||
return TransitionCallbackReturn.SUCCESS
|
||||
|
||||
# def test_im_proc(self):
|
||||
# im = "im_tst.jpg"
|
||||
# if not os.path.isfile(im):
|
||||
# print(f"File not found '{im}'")
|
||||
# return
|
||||
# frame = cv2.imread(im)
|
||||
# frame = frame[..., ::-1].copy()
|
||||
# self._K = [[585.756089952257, 0.0, 319.5], [0.0, 585.756089952257, 239.5], [0.0, 0.0, 1.0]]
|
||||
# # call the inference node
|
||||
# p = self.dope_node.image_processing(img=frame, camera_info=self._K)
|
||||
# print(f"pose = {p}")
|
||||
|
||||
def image_callback(self, data):
|
||||
""" Image Callback function. """
|
||||
if not self._is_camerainfo:
|
||||
self.get_logger().warning("No data from CameraInfo")
|
||||
return
|
||||
# # get camera pose
|
||||
# camera_name = self.camera_link #self.topicImage.split('/')[1]
|
||||
# try:
|
||||
# t = self.tf_buffer.lookup_transform("world", camera_name, rclpy.time.Time())
|
||||
# except TransformException as ex:
|
||||
# self.get_logger().info(f"Could not transform {camera_name} to world: {ex}")
|
||||
# return
|
||||
# self._cam_pose.position.x = t.transform.translation.x
|
||||
# self._cam_pose.position.y = t.transform.translation.y
|
||||
# self._cam_pose.position.z = t.transform.translation.z
|
||||
# self._cam_pose.orientation.w = t.transform.rotation.w
|
||||
# self._cam_pose.orientation.x = t.transform.rotation.x
|
||||
# self._cam_pose.orientation.y = t.transform.rotation.y
|
||||
# self._cam_pose.orientation.z = t.transform.rotation.z
|
||||
|
||||
# Convert ROS Image message to OpenCV image
|
||||
current_frame = self.br.imgmsg_to_cv2(data)
|
||||
|
||||
# # Save image
|
||||
# frame_im = FILE_TEMP_IMAGE # str(self.objPath / "image_rgb.png")
|
||||
# cv2.imwrite(frame_im, current_frame)
|
||||
self._image_cnt += 1
|
||||
|
||||
self.get_logger().info(f"dope: begin {self._image_cnt}")
|
||||
current_frame = current_frame[..., ::-1].copy()
|
||||
pose = self.dope_node.image_processing(img=current_frame, camera_info=self._K)
|
||||
self.get_logger().info(f"dope: end {self._image_cnt}")
|
||||
if self._pub is not None and self._pub.is_activated:
|
||||
# publish pose estimation result
|
||||
self._pub.publish(pose)
|
||||
# if self.tf2_send_pose:
|
||||
# self.tf_obj_pose(t,q) #(self._pose)
|
||||
|
||||
from detector import ModelData, ObjectDetector
|
||||
from cuboid_pnp_solver import CuboidPNPSolver
|
||||
from cuboid import Cuboid3d
|
||||
import numpy as np
|
||||
# robossembler_ws/src/robossembler-ros2/rbs_perception/scripts/utils.py
|
||||
# from utils import Draw
|
||||
|
||||
class Dope(object):
|
||||
"""ROS node that listens to image topic, runs DOPE, and publishes DOPE results"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config, # config yaml loaded eg dict
|
||||
weight, # path to weight
|
||||
class_name,
|
||||
dim: list # dimensions of model 'class_name'
|
||||
):
|
||||
self.input_is_rectified = config["input_is_rectified"]
|
||||
self.downscale_height = config["downscale_height"]
|
||||
|
||||
self.config_detect = lambda: None
|
||||
self.config_detect.mask_edges = 1
|
||||
self.config_detect.mask_faces = 1
|
||||
self.config_detect.vertex = 1
|
||||
self.config_detect.threshold = 0.5
|
||||
self.config_detect.softmax = 1000
|
||||
self.config_detect.thresh_angle = config["thresh_angle"]
|
||||
self.config_detect.thresh_map = config["thresh_map"]
|
||||
self.config_detect.sigma = config["sigma"]
|
||||
self.config_detect.thresh_points = config["thresh_points"]
|
||||
|
||||
# load network model, create PNP solver
|
||||
self.model = ModelData(name=class_name, net_path=weight)
|
||||
|
||||
# TODO warn on load_net_model():
|
||||
# Loading DOPE model '/home/shalenikol/robossembler_ws/fork_e47.pth'...
|
||||
# /home/shalenikol/.local/lib/python3.10/site-packages/torchvision/models/_utils.py:208:
|
||||
# UserWarning: The parameter 'pretrained' is deprecated since 0.13 and may be removed in the future, please use 'weights' instead.
|
||||
|
||||
# warnings.warn(
|
||||
# /home/shalenikol/.local/lib/python3.10/site-packages/torchvision/models/_utils.py:223:
|
||||
# UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and may be removed in the future.
|
||||
# The current behavior is equivalent to passing `weights=None`.
|
||||
# warnings.warn(msg)
|
||||
self.model.load_net_model()
|
||||
# print("Model Loaded")
|
||||
|
||||
# try:
|
||||
# self.draw_color = tuple(config["draw_colors"][class_name])
|
||||
# except:
|
||||
self.draw_color = (0, 255, 0)
|
||||
|
||||
# TODO load dim from config
|
||||
# dim = [13.7, 16.5, 20.2] # config["dimensions"][class_name]
|
||||
self.dimension = tuple(dim)
|
||||
self.class_id = 1 #config["class_ids"][class_name]
|
||||
|
||||
self.pnp_solver = CuboidPNPSolver(class_name, cuboid3d=Cuboid3d(dim))
|
||||
self.class_name = class_name
|
||||
# print("Ctrl-C to stop")
|
||||
|
||||
def image_processing(
|
||||
self,
|
||||
img,
|
||||
camera_info
|
||||
# img_name, # this is the name of the img file to save, it needs the .png at the end
|
||||
# output_folder, # folder where to put the output
|
||||
# weight
|
||||
) -> Pose:
|
||||
# !!! Allways self.input_is_rectified = True
|
||||
camera_matrix = np.matrix(camera_info, dtype="float64").copy()
|
||||
dist_coeffs = np.zeros((4, 1))
|
||||
# Update camera matrix and distortion coefficients
|
||||
# if self.input_is_rectified:
|
||||
# P = np.matrix(
|
||||
# camera_info["projection_matrix"]["data"], dtype="float64"
|
||||
# ).copy()
|
||||
# P.resize((3, 4))
|
||||
# camera_matrix = P[:, :3]
|
||||
# dist_coeffs = np.zeros((4, 1))
|
||||
# else:
|
||||
# # TODO ???
|
||||
# camera_matrix = np.matrix(camera_info.K, dtype="float64")
|
||||
# camera_matrix.resize((3, 3))
|
||||
# dist_coeffs = np.matrix(camera_info.D, dtype="float64")
|
||||
# dist_coeffs.resize((len(camera_info.D), 1))
|
||||
|
||||
# Downscale image if necessary
|
||||
height, width, _ = img.shape
|
||||
scaling_factor = float(self.downscale_height) / height
|
||||
if scaling_factor < 1.0:
|
||||
camera_matrix[:2] *= scaling_factor
|
||||
img = cv2.resize(img, (int(scaling_factor * width), int(scaling_factor * height)))
|
||||
|
||||
self.pnp_solver.set_camera_intrinsic_matrix(camera_matrix)
|
||||
self.pnp_solver.set_dist_coeffs(dist_coeffs)
|
||||
|
||||
# # Copy and draw image
|
||||
# img_copy = img.copy()
|
||||
# im = Image.fromarray(img_copy)
|
||||
# draw = Draw(im)
|
||||
|
||||
# # dictionary for the final output
|
||||
# dict_out = {"camera_data": {}, "objects": []}
|
||||
|
||||
# Detect object
|
||||
results, _ = ObjectDetector.detect_object_in_image(
|
||||
self.model.net, self.pnp_solver, img, self.config_detect
|
||||
)
|
||||
|
||||
# Publish pose #and overlay cube on image
|
||||
p = Pose()
|
||||
for _, result in enumerate(results):
|
||||
if result["location"] is None:
|
||||
continue
|
||||
|
||||
l = result["location"]
|
||||
q = result["quaternion"]
|
||||
p.position.x = l[0]
|
||||
p.position.y = l[1]
|
||||
p.position.z = l[2]
|
||||
p.orientation.x = q[0]
|
||||
p.orientation.y = q[1]
|
||||
p.orientation.z = q[2]
|
||||
p.orientation.w = q[3]
|
||||
break # !!! only considering the first option for now
|
||||
return p
|
||||
# # save the json files
|
||||
# with open(f"tmp_result{i}.json", "w") as fp:
|
||||
# json.dump(result, fp, indent=2)
|
||||
|
||||
# dict_out["objects"].append(
|
||||
# {
|
||||
# "class": self.class_name,
|
||||
# "location": np.array(loc).tolist(),
|
||||
# "quaternion_xyzw": np.array(ori).tolist(),
|
||||
# "projected_cuboid": np.array(result["projected_points"]).tolist(),
|
||||
# }
|
||||
# )
|
||||
# # Draw the cube
|
||||
# if None not in result["projected_points"]:
|
||||
# points2d = []
|
||||
# for pair in result["projected_points"]:
|
||||
# points2d.append(tuple(pair))
|
||||
# draw.draw_cube(points2d, self.draw_color)
|
||||
|
||||
# # create directory to save image if it does not exist
|
||||
# img_name_base = img_name.split("/")[-1]
|
||||
# output_path = os.path.join(
|
||||
# output_folder,
|
||||
# weight.split("/")[-1].replace(".pth", ""),
|
||||
# *img_name.split("/")[:-1],
|
||||
# )
|
||||
# if not os.path.isdir(output_path):
|
||||
# os.makedirs(output_path, exist_ok=True)
|
||||
|
||||
# im.save(os.path.join(output_path, img_name_base))
|
||||
|
||||
# json_path = os.path.join(
|
||||
# output_path, ".".join(img_name_base.split(".")[:-1]) + ".json"
|
||||
# )
|
||||
# # save the json files
|
||||
# with open(json_path, "w") as fp:
|
||||
# json.dump(dict_out, fp, indent=2)
|
||||
|
||||
def main():
|
||||
rclpy.init()
|
||||
|
||||
executor = rclpy.executors.SingleThreadedExecutor()
|
||||
# executor = rclpy.executors.MultiThreadedExecutor()
|
||||
lc_node = PE_DOPE() #NODE_NAME_DEFAULT)
|
||||
executor.add_node(lc_node)
|
||||
try:
|
||||
executor.spin()
|
||||
except (KeyboardInterrupt, rclpy.executors.ExternalShutdownException):
|
||||
lc_node.destroy_node()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
222
rbs_perception/scripts/rbs_interface.py
Executable file
222
rbs_perception/scripts/rbs_interface.py
Executable file
|
@ -0,0 +1,222 @@
|
|||
#!/usr/bin/env python3
|
||||
"""
|
||||
rbsInterface_node
|
||||
ROS 2 program for Robossembler
|
||||
|
||||
@shalenikol release 0.1
|
||||
"""
|
||||
import os
|
||||
import json
|
||||
import yaml
|
||||
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
# from rclpy.action import ActionServer, CancelResponse, GoalResponse
|
||||
from rclpy.callback_groups import ReentrantCallbackGroup
|
||||
from ament_index_python.packages import get_package_share_directory
|
||||
|
||||
from rclpy.parameter import Parameter
|
||||
from rcl_interfaces.srv import SetParameters #, GetParameters
|
||||
# from rcl_interfaces.msg import Parameter, ParameterValue
|
||||
# rcl_interfaces.msg.ParameterValue
|
||||
from lifecycle_msgs.srv import ChangeState, GetState
|
||||
from lifecycle_msgs.msg import Transition
|
||||
# from lifecycle_msgs.msg import State
|
||||
from rbs_skill_interfaces.srv import RbsBt
|
||||
# from ros2node.api import get_node_names as get_all_node_names
|
||||
|
||||
PARAM_NAME = "str_param"
|
||||
PARAM_SKILL_CONFIG = "skill_cfg"
|
||||
|
||||
def get_transfer_path_():
|
||||
return os.path.join(get_package_share_directory("rbs_perception"), "config")
|
||||
|
||||
class rbsInterface(Node):
|
||||
def __init__(self, node_name):
|
||||
"""Construct the node."""
|
||||
self._i_exe = 0 # run index
|
||||
self._timer = None # defer run after callback
|
||||
# self._cli_changestate = None # client for lifecycle node
|
||||
self.cfg_data = None # config for current action
|
||||
super().__init__(node_name)
|
||||
# self.declare_parameter(PARAM_NAME, rclpy.Parameter.Type.STRING)
|
||||
self.cb_group = ReentrantCallbackGroup()
|
||||
self._service = self.create_service(RbsBt, "rbs_interface", self.service_callback, callback_group=self.cb_group)
|
||||
|
||||
# def get_remote_parameter(self, remote_node_name, param_name):
|
||||
# cli = self.create_client(GetParameters, remote_node_name + '/get_parameters')
|
||||
# while not cli.wait_for_service(timeout_sec=1.0):
|
||||
# self.get_logger().info('service not available, waiting again...')
|
||||
# req = GetParameters.Request()
|
||||
# req.names = [param_name]
|
||||
# future = cli.call_async(req)
|
||||
|
||||
# while rclpy.ok():
|
||||
# rclpy.spin_once(self)
|
||||
# if future.done():
|
||||
# try:
|
||||
# res = future.result()
|
||||
# return getattr(res.values[0], self.type_arr[res.values[0].type])
|
||||
# except Exception as e:
|
||||
# self.get_logger().warn('Service call failed %r' % (e,))
|
||||
# break
|
||||
|
||||
def set_remote_parameter(self, remote_node_name: str, parameter_name: str, new_parameter_value):
|
||||
self.cli = self.create_client(SetParameters, remote_node_name + "/set_parameters")
|
||||
while not self.cli.wait_for_service(timeout_sec=1.0):
|
||||
self.get_logger().info("'" + remote_node_name + "' service not available, waiting again...")
|
||||
req = SetParameters.Request()
|
||||
req.parameters = [Parameter(parameter_name, value=new_parameter_value).to_parameter_msg()]
|
||||
future = self.cli.call_async(req)
|
||||
|
||||
self.executor.spin_until_future_complete(future)
|
||||
res = future.result()
|
||||
# self.get_logger().info(f"result : {type(res)}/{res}")
|
||||
|
||||
return res.results[0].successful
|
||||
|
||||
# while rclpy.ok():
|
||||
# rclpy.spin_once(self)
|
||||
# if future.done():
|
||||
# try:
|
||||
# res = future.result()
|
||||
# return res.results[0].successful
|
||||
# except Exception as e:
|
||||
# self.get_logger().warn("Service call failed %r" % (e,))
|
||||
# break
|
||||
|
||||
def _deserialize(self, file_path: str):
|
||||
with open(file_path, "r") as f:
|
||||
if file_path.split() == ".yaml":
|
||||
s = yaml.load(f, Loader=yaml.FullLoader)
|
||||
else: # ".json"
|
||||
s = json.load(f)
|
||||
return s
|
||||
|
||||
def _load_config(self, action: str): #, command: str):
|
||||
p = os.path.join(get_transfer_path_(), action+".json")
|
||||
# load config
|
||||
return self._deserialize(p)
|
||||
|
||||
def run_action(self, command_data: dict) -> bool:
|
||||
p_list = command_data["param"]
|
||||
oper_type = command_data["type"]
|
||||
node_name = self.cfg_data["ROS2"]["node_name"]
|
||||
if len(p_list) > 0:
|
||||
ext = command_data["format"] # 'yaml' or 'json'
|
||||
param_file = os.path.join(get_transfer_path_(), command_data["name"]+"."+ext)
|
||||
with open(param_file, "r") as f:
|
||||
data = f.read()
|
||||
if not self.set_remote_parameter(node_name, PARAM_NAME, data):
|
||||
return False
|
||||
if not self.set_remote_parameter(node_name, PARAM_SKILL_CONFIG, yaml.dump(self.cfg_data)):
|
||||
return False
|
||||
|
||||
ret = False # default return value
|
||||
if oper_type == "run":
|
||||
# if not self._cli_changestate:
|
||||
# self.cb_group = ReentrantCallbackGroup()
|
||||
self.cli_changestate = self.create_client(ChangeState, f"{node_name}/change_state") #, callback_group=self.cb_group)
|
||||
# self._cli_changestate = self.create_client(GetState, f"{node_name}/get_state")
|
||||
|
||||
while not self.cli_changestate.wait_for_service(timeout_sec=1.0):
|
||||
self.get_logger().info(f"'{node_name}' not available... wait")
|
||||
|
||||
req = ChangeState.Request()
|
||||
# self._req = GetState.Request()
|
||||
req.transition.id = Transition.TRANSITION_CONFIGURE
|
||||
# self._i_exe = 1 # call_async(self._req)
|
||||
# self._timer = self.create_timer(0.0001, self.timer_callback)
|
||||
future = self.cli_changestate.call_async(req)
|
||||
self.executor.spin_until_future_complete(future)
|
||||
res = future.result()
|
||||
if res: # is not None:
|
||||
if res.success:
|
||||
req = ChangeState.Request()
|
||||
req.transition.id = Transition.TRANSITION_ACTIVATE
|
||||
future = self.cli_changestate.call_async(req)
|
||||
self.executor.spin_until_future_complete(future)
|
||||
res = future.result()
|
||||
if res: # is not None:
|
||||
ret = res.success
|
||||
# self.cli_changestate.destroy()
|
||||
# else:
|
||||
# return False # state = future.exception()
|
||||
# self.get_logger().info(f"state : {type(state)}/{state}")
|
||||
|
||||
elif oper_type == "stop":
|
||||
self.cli_changestate = self.create_client(ChangeState, f"{node_name}/change_state") #, callback_group=self.cb_group)
|
||||
while not self.cli_changestate.wait_for_service(timeout_sec=1.0):
|
||||
self.get_logger().info(f"'{node_name}' not available... wait")
|
||||
|
||||
req = ChangeState.Request()
|
||||
req.transition.id = Transition.TRANSITION_DEACTIVATE
|
||||
future = self.cli_changestate.call_async(req)
|
||||
self.executor.spin_until_future_complete(future)
|
||||
res = future.result()
|
||||
if res: # is not None:
|
||||
if res.success:
|
||||
req = ChangeState.Request()
|
||||
req.transition.id = Transition.TRANSITION_CLEANUP
|
||||
future = self.cli_changestate.call_async(req)
|
||||
self.executor.spin_until_future_complete(future)
|
||||
res = future.result()
|
||||
if res: # is not None:
|
||||
ret = res.success
|
||||
# self.cli_changestate.destroy()
|
||||
return ret
|
||||
|
||||
def timer_callback(self):
|
||||
if self._i_exe == 1:
|
||||
self._i_exe = 2
|
||||
# self.get_logger().info(f"{self._oper_type} timer_callback 1")
|
||||
# self._future = self._cli_changestate.call_async(self._req)
|
||||
# self.get_logger().info(f"self._future : {type(self._future)}/{self._future}")
|
||||
|
||||
elif self._i_exe == 2:
|
||||
# self.get_logger().info(f"{self._oper_type} timer_callback 2")
|
||||
responce = True #self._future.result()
|
||||
if responce:
|
||||
# self.get_logger().info(f"responce : {responce}")
|
||||
self._i_exe == 0
|
||||
self._timer.cancel()
|
||||
else:
|
||||
self._timer.cancel()
|
||||
|
||||
# def changestate_callback(self):
|
||||
# self.get_logger().info(f"changestate_callback is")
|
||||
|
||||
def service_callback(self, request, response):
|
||||
self.get_logger().info(f"Incoming request for Action ({request.action}/{request.command})")
|
||||
self.cfg_data = self._load_config(request.action) #, request.command)
|
||||
self.get_logger().info(f'Config: Ok ({self.cfg_data["Module"]["description"]})')
|
||||
|
||||
is_action = False
|
||||
for comm in self.cfg_data["BTAction"]:
|
||||
if comm["name"] == request.command:
|
||||
is_action = self.run_action(comm)
|
||||
|
||||
# if not os.path.isfile(request.object.mesh_path):
|
||||
# response.call_status = False
|
||||
# response.error_msg = f"{request.object.mesh_path}: no such file"
|
||||
|
||||
response.ok = is_action #True
|
||||
return response
|
||||
|
||||
def main():
|
||||
rclpy.init()
|
||||
# i_node = rbsInterface("rbs_interface")
|
||||
# rclpy.spin(i_node)
|
||||
# rclpy.shutdown()
|
||||
|
||||
# executor = rclpy.executors.SingleThreadedExecutor()
|
||||
executor = rclpy.executors.MultiThreadedExecutor()
|
||||
i_node = rbsInterface("rbs_interface")
|
||||
executor.add_node(i_node)
|
||||
try:
|
||||
executor.spin()
|
||||
except (KeyboardInterrupt, rclpy.executors.ExternalShutdownException):
|
||||
i_node.destroy_node()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -69,6 +69,7 @@ def generate_launch_description():
|
|||
env_manager = Node(
|
||||
package="env_manager",
|
||||
executable="run_env_manager",
|
||||
parameters=[{'use_sim_time': True}],
|
||||
condition=IfCondition(launch_env_manager)
|
||||
)
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
<plugin name='ignition::gazebo::systems::UserCommands' filename='ignition-gazebo-user-commands-system'/>
|
||||
<plugin name='ignition::gazebo::systems::SceneBroadcaster' filename='ignition-gazebo-scene-broadcaster-system'/>
|
||||
<plugin name='ignition::gazebo::systems::Contact' filename='ignition-gazebo-contact-system'/>
|
||||
<plugin filename="ignition-gazebo-forcetorque-system" name="ignition::gazebo::systems::ForceTorque"/>
|
||||
<plugin name="ignition::gazebo::systems::Sensors" filename="ignition-gazebo-sensors-system">
|
||||
<render_engine>ogre2</render_engine>
|
||||
</plugin>
|
||||
|
@ -93,15 +94,15 @@
|
|||
</link>
|
||||
</model>
|
||||
<!-- Manipulating objects -->
|
||||
<include>
|
||||
<name>board</name>
|
||||
<uri>model://board</uri>
|
||||
<pose>0.45 0.0 0.0 0.0 0.0 0.0</pose>
|
||||
</include>
|
||||
<include>
|
||||
<name>bishop</name>
|
||||
<uri>model://bishop</uri>
|
||||
<pose>0.35 0.0 0.0 0.0 0.0 0.0</pose>
|
||||
</include>
|
||||
<!-- <include> -->
|
||||
<!-- <name>board</name> -->
|
||||
<!-- <uri>model://board</uri> -->
|
||||
<!-- <pose>0.45 0.0 0.0 0.0 0.0 0.0</pose> -->
|
||||
<!-- </include> -->
|
||||
<!-- <include> -->
|
||||
<!-- <name>bishop</name> -->
|
||||
<!-- <uri>model://bishop</uri> -->
|
||||
<!-- <pose>0.35 0.0 0.0 0.0 0.0 0.0</pose> -->
|
||||
<!-- </include> -->
|
||||
</world>
|
||||
</sdf>
|
|
@ -31,6 +31,7 @@ rosidl_generate_interfaces(${PROJECT_NAME}
|
|||
"srv/GetPickPlacePoses.srv"
|
||||
"srv/AddPlanningSceneObject.srv"
|
||||
"srv/PlanningSceneObjectState.srv"
|
||||
"srv/RbsBt.srv"
|
||||
DEPENDENCIES std_msgs geometry_msgs moveit_msgs shape_msgs
|
||||
)
|
||||
|
||||
|
|
4
rbs_skill_interfaces/srv/RbsBt.srv
Normal file
4
rbs_skill_interfaces/srv/RbsBt.srv
Normal file
|
@ -0,0 +1,4 @@
|
|||
string action
|
||||
string command
|
||||
---
|
||||
bool ok
|
|
@ -152,6 +152,7 @@ ament_export_include_directories(include)
|
|||
ament_python_install_package(${PROJECT_NAME})
|
||||
install(PROGRAMS
|
||||
scripts/test_cartesian_controller.py
|
||||
scripts/move_to_pose.py
|
||||
DESTINATION lib/${PROJECT_NAME}
|
||||
)
|
||||
|
||||
|
|
|
@ -38,6 +38,12 @@ def launch_setup(context, *args, **kwargs):
|
|||
]
|
||||
)
|
||||
|
||||
move_to_pose = Node(
|
||||
package="rbs_skill_servers",
|
||||
executable="move_to_pose.py",
|
||||
namespace=namespace
|
||||
)
|
||||
|
||||
gripper_control_node = Node(
|
||||
package="rbs_skill_servers",
|
||||
executable="gripper_control_action_server",
|
||||
|
@ -89,6 +95,7 @@ def launch_setup(context, *args, **kwargs):
|
|||
gripper_control_node,
|
||||
move_cartesian_path_action_server,
|
||||
move_joint_state_action_server,
|
||||
move_to_pose,
|
||||
# grasp_pose_loader
|
||||
]
|
||||
return nodes_to_start
|
||||
|
|
108
rbs_skill_servers/scripts/move_to_pose.py
Executable file
108
rbs_skill_servers/scripts/move_to_pose.py
Executable file
|
@ -0,0 +1,108 @@
|
|||
#!/usr/bin/python
|
||||
import rclpy
|
||||
from rclpy.action import ActionServer
|
||||
from rclpy.node import Node
|
||||
import numpy as np
|
||||
from rclpy.callback_groups import ReentrantCallbackGroup
|
||||
from rclpy.executors import MultiThreadedExecutor
|
||||
|
||||
from geometry_msgs.msg import Pose, PoseStamped
|
||||
from rbs_skill_interfaces.action import MoveitSendPose
|
||||
|
||||
class PoseSubscriber(Node):
|
||||
def __init__(self, parent=None):
|
||||
super().__init__('pose_subscriber')
|
||||
self.parent = parent
|
||||
self._sub = self.create_subscription(PoseStamped,
|
||||
"/cartesian_motion_controller/current_pose",
|
||||
self.parent.on_pose_callback, 1,
|
||||
callback_group=self.parent._callback_group)
|
||||
self.get_logger().info('PoseSubscriber node initialized')
|
||||
|
||||
class CartesianMoveToPose(Node):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__('cartesian_move_to_pose')
|
||||
self._callback_group = ReentrantCallbackGroup()
|
||||
self._action_server = ActionServer(
|
||||
self,
|
||||
MoveitSendPose,
|
||||
'cartesian_move_to_pose',
|
||||
self.execute_callback, callback_group=self._callback_group)
|
||||
self._pub = self.create_publisher(PoseStamped,
|
||||
"/cartesian_motion_controller/target_frame", 1,
|
||||
callback_group=self._callback_group)
|
||||
self.current_pose = None
|
||||
self.goal_tolerance = 0.05
|
||||
|
||||
def on_pose_callback(self, msg: PoseStamped):
|
||||
if isinstance(msg, PoseStamped):
|
||||
self.current_pose = msg
|
||||
|
||||
def execute_callback(self, goal_handle):
|
||||
self.get_logger().debug(f"Executing goal {goal_handle.request.target_pose}")
|
||||
tp = PoseStamped()
|
||||
tp.pose = goal_handle.request.target_pose
|
||||
tp.header.stamp = self.get_clock().now().to_msg()
|
||||
tp.header.frame_id = "base_link"
|
||||
|
||||
while self.get_distance_to_target(tp.pose) >= self.goal_tolerance:
|
||||
self._pub.publish(tp)
|
||||
|
||||
goal_handle.succeed()
|
||||
|
||||
result = MoveitSendPose.Result()
|
||||
result.success = True
|
||||
return result
|
||||
|
||||
def get_distance_to_target(self, target_pose: Pose):
|
||||
if self.current_pose is None or self.current_pose.pose is None:
|
||||
self.get_logger().warn("Current pose is not available")
|
||||
return None
|
||||
|
||||
current_pose = self.current_pose.pose
|
||||
|
||||
current_position = np.array([
|
||||
current_pose.position.x,
|
||||
current_pose.position.y,
|
||||
current_pose.position.z,
|
||||
current_pose.orientation.x,
|
||||
current_pose.orientation.y,
|
||||
current_pose.orientation.z
|
||||
])
|
||||
|
||||
target_position = np.array([
|
||||
target_pose.position.x,
|
||||
target_pose.position.y,
|
||||
target_pose.position.z,
|
||||
target_pose.orientation.x,
|
||||
target_pose.orientation.y,
|
||||
target_pose.orientation.z
|
||||
])
|
||||
|
||||
# Проверка на наличие значений в массивах координат
|
||||
if np.any(np.isnan(current_position)) or np.any(np.isnan(target_position)):
|
||||
self.get_logger().error("Invalid coordinates")
|
||||
return None
|
||||
|
||||
# Вычисляем расстояние между текущей и целевой позициями
|
||||
distance = np.linalg.norm(current_position - target_position)
|
||||
|
||||
return distance
|
||||
|
||||
def main(args=None):
|
||||
rclpy.init(args=args)
|
||||
|
||||
cartesian_move_to_pose = CartesianMoveToPose()
|
||||
pose_subscriber = PoseSubscriber(parent=cartesian_move_to_pose)
|
||||
|
||||
executor = MultiThreadedExecutor()
|
||||
executor.add_node(cartesian_move_to_pose)
|
||||
executor.add_node(pose_subscriber)
|
||||
|
||||
executor.spin()
|
||||
|
||||
rclpy.shutdown()
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
1
rbs_skill_servers/scripts/test_cartesian_controller.py
Normal file → Executable file
1
rbs_skill_servers/scripts/test_cartesian_controller.py
Normal file → Executable file
|
@ -1,3 +1,4 @@
|
|||
#!/usr/bin/python
|
||||
import rclpy
|
||||
from rclpy.node import Node
|
||||
import argparse
|
||||
|
|
|
@ -12,13 +12,17 @@
|
|||
#include "tf2_ros/static_transform_broadcaster.h"
|
||||
#include <Eigen/Core>
|
||||
#include <fstream>
|
||||
#include <geometry_msgs/msg/detail/pose__struct.hpp>
|
||||
#include <geometry_msgs/msg/detail/pose_array__struct.hpp>
|
||||
#include <rbs_utils_interfaces/msg/detail/assembly_config__struct.hpp>
|
||||
#include <rclcpp/clock.hpp>
|
||||
#include <rclcpp/logger.hpp>
|
||||
#include <rclcpp/node.hpp>
|
||||
#include <rclcpp/node_interfaces/node_clock_interface.hpp>
|
||||
#include <rclcpp/time.hpp>
|
||||
#include <tf2/convert.h>
|
||||
#include <tf2_eigen/tf2_eigen.hpp>
|
||||
#include <tf2_msgs/msg/detail/tf_message__struct.hpp>
|
||||
#include <unordered_map>
|
||||
|
||||
const std::string env_dir = std::getenv("RBS_ASSEMBLY_DIR");
|
||||
|
@ -49,15 +53,18 @@ public:
|
|||
const rclcpp::node_interfaces::NodeClockInterface::SharedPtr
|
||||
&t_clock_node_interface);
|
||||
|
||||
tf2_msgs::msg::TFMessage getAllPossibleTfData();
|
||||
tf2_msgs::msg::TFMessage getTfData(const std::string &model_name);
|
||||
tf2_msgs::msg::TFMessage getGraspTfData(const std::string &model_name);
|
||||
|
||||
std::vector<std::string> getSceneModelNames();
|
||||
std::vector<std::string> getUniqueSceneModelNames();
|
||||
|
||||
void printWorkspace();
|
||||
geometry_msgs::msg::PoseArray getWorkspaceInspectorTrajectory();
|
||||
geometry_msgs::msg::Pose
|
||||
transformTrajectory(const geometry_msgs::msg::TransformStamped &pose);
|
||||
transformTrajectory(const geometry_msgs::msg::Pose &pose);
|
||||
void saveRbsConfig();
|
||||
tf2_msgs::msg::TFMessage getAdditionalPoses();
|
||||
|
||||
private:
|
||||
std::vector<std::string> m_env_files;
|
||||
|
@ -69,7 +76,7 @@ private:
|
|||
rclcpp::Clock::SharedPtr m_clock;
|
||||
void parseRbsDb(const std::string &filepath);
|
||||
|
||||
tf2_msgs::msg::TFMessage getWorkspace();
|
||||
geometry_msgs::msg::PoseArray getWorkspace();
|
||||
geometry_msgs::msg::Transform
|
||||
createTransform(const geometry_msgs::msg::Pose &pose);
|
||||
};
|
||||
|
|
|
@ -1,12 +1,15 @@
|
|||
#include "rbs_utils/rbs_utils.hpp"
|
||||
#include <Eigen/src/Geometry/Transform.h>
|
||||
#include <dynmsg/typesupport.hpp>
|
||||
#include <fstream>
|
||||
#include <geometry_msgs/msg/detail/pose__struct.hpp>
|
||||
#include <geometry_msgs/msg/detail/transform_stamped__struct.hpp>
|
||||
#include <rbs_utils_interfaces/msg/detail/assembly_config__struct.hpp>
|
||||
#include <rbs_utils_interfaces/msg/detail/assembly_config__traits.hpp>
|
||||
#include <rclcpp/logging.hpp>
|
||||
#include <rclcpp/node_interfaces/node_clock_interface.hpp>
|
||||
#include <string>
|
||||
#include <strstream>
|
||||
#include <tf2/LinearMath/Transform.h>
|
||||
#include <tf2/convert.h>
|
||||
#include <tf2_eigen/tf2_eigen.hpp>
|
||||
|
@ -27,7 +30,7 @@ AssemblyConfigLoader::AssemblyConfigLoader(
|
|||
m_logger(t_logging_interface->get_logger()),
|
||||
m_clock(t_clock_interface->get_clock()) {
|
||||
if (!m_assembly_dir.empty()) {
|
||||
std::vector<std::string> filenames = {"robossembler_db"};
|
||||
std::vector<std::string> filenames = {"rbs_db"};
|
||||
for (auto &filename : filenames) {
|
||||
std::string filepath =
|
||||
env_dir + "/" + m_assembly_dir + "/" + filename + ".yaml";
|
||||
|
@ -49,8 +52,9 @@ void AssemblyConfigLoader::parseRbsDb(const std::string &filepath) {
|
|||
InterfaceTypeName interface{"rbs_utils_interfaces", "AssemblyConfig"};
|
||||
rosmsg.type_info = dynmsg::cpp::get_type_info(interface);
|
||||
|
||||
void * ros_msg = reinterpret_cast<void *>(&m_assembly_config);
|
||||
dynmsg::cpp::yaml_and_typeinfo_to_rosmsg(rosmsg.type_info, asm_config_string, ros_msg);
|
||||
void *ros_msg = reinterpret_cast<void *>(&m_assembly_config);
|
||||
dynmsg::cpp::yaml_and_typeinfo_to_rosmsg(rosmsg.type_info,
|
||||
asm_config_string, ros_msg);
|
||||
|
||||
} catch (const std::exception &e) {
|
||||
RCLCPP_ERROR(m_logger, "Exception reading file %s: %s", filepath.c_str(),
|
||||
|
@ -58,11 +62,9 @@ void AssemblyConfigLoader::parseRbsDb(const std::string &filepath) {
|
|||
}
|
||||
}
|
||||
|
||||
[[deprecated("Not Implemented")]] void AssemblyConfigLoader::saveRbsConfig() {
|
||||
[[deprecated("Not Implemented")]] void AssemblyConfigLoader::saveRbsConfig() {}
|
||||
|
||||
}
|
||||
|
||||
std::vector<std::string> AssemblyConfigLoader::getSceneModelNames() {
|
||||
std::vector<std::string> AssemblyConfigLoader::getUniqueSceneModelNames() {
|
||||
std::vector<std::string> model_names;
|
||||
if (m_assembly_config.relative_part.size() != 0) {
|
||||
for (auto &t : m_assembly_config.relative_part) {
|
||||
|
@ -79,36 +81,137 @@ std::vector<std::string> AssemblyConfigLoader::getSceneModelNames() {
|
|||
return model_names;
|
||||
}
|
||||
|
||||
tf2_msgs::msg::TFMessage AssemblyConfigLoader::getAllPossibleTfData() {
|
||||
tf2_msgs::msg::TFMessage tp;
|
||||
// Get absolute parts
|
||||
for (auto &abs_poses : m_assembly_config.absolute_part) {
|
||||
geometry_msgs::msg::TransformStamped tmp;
|
||||
tmp.transform = createTransform(abs_poses.pose);
|
||||
tmp.child_frame_id = abs_poses.name;
|
||||
tmp.header.frame_id = "world";
|
||||
tmp.header.stamp = m_clock->now();
|
||||
tp.transforms.push_back(tmp);
|
||||
}
|
||||
// Get relative parts
|
||||
for (const auto &relative_part : m_assembly_config.relative_part) {
|
||||
geometry_msgs::msg::TransformStamped tmp;
|
||||
tmp.transform = createTransform(relative_part.pose);
|
||||
tmp.child_frame_id = relative_part.name;
|
||||
tmp.header.frame_id = relative_part.relative_at;
|
||||
tmp.header.stamp = m_clock->now();
|
||||
tp.transforms.push_back(tmp);
|
||||
RCLCPP_INFO(m_logger, "Model name [%s]", relative_part.name.c_str());
|
||||
}
|
||||
// Get grasp poses
|
||||
for (const auto &grasp_pose : m_assembly_config.grasp_pose) {
|
||||
geometry_msgs::msg::TransformStamped tmp;
|
||||
tmp.transform = createTransform(grasp_pose.pose);
|
||||
tmp.child_frame_id = grasp_pose.name;
|
||||
tmp.header.frame_id = grasp_pose.relative_at;
|
||||
tmp.header.stamp = m_clock->now();
|
||||
tp.transforms.push_back(tmp);
|
||||
}
|
||||
|
||||
return tp;
|
||||
}
|
||||
|
||||
tf2_msgs::msg::TFMessage
|
||||
AssemblyConfigLoader::getGraspTfData(const std::string &model_name) {
|
||||
tf2_msgs::msg::TFMessage tp;
|
||||
|
||||
bool found_grasp_pose = false;
|
||||
if (!m_assembly_config.relative_part.empty()) {
|
||||
for (auto &abs_poses : m_assembly_config.absolute_part) {
|
||||
geometry_msgs::msg::TransformStamped tmp;
|
||||
tmp.transform = createTransform(abs_poses.pose);
|
||||
tmp.child_frame_id = abs_poses.name;
|
||||
tmp.header.frame_id = "world";
|
||||
tmp.header.stamp = m_clock->now();
|
||||
tp.transforms.push_back(tmp);
|
||||
}
|
||||
} else {
|
||||
RCLCPP_ERROR(m_logger, "Relative parts is empty size: %zu",
|
||||
m_assembly_config.relative_part.size());
|
||||
}
|
||||
|
||||
for (const auto &grasp_pose : m_assembly_config.grasp_pose) {
|
||||
if (grasp_pose.relative_at == model_name) {
|
||||
geometry_msgs::msg::TransformStamped tmp;
|
||||
tmp.transform = createTransform(grasp_pose.pose);
|
||||
tmp.child_frame_id = grasp_pose.name;
|
||||
tmp.header.frame_id = grasp_pose.relative_at;
|
||||
tmp.header.stamp = m_clock->now();
|
||||
tp.transforms.push_back(tmp);
|
||||
found_grasp_pose = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found_grasp_pose) {
|
||||
RCLCPP_ERROR(m_logger, "Grasp pose not found for model %s",
|
||||
model_name.c_str());
|
||||
}
|
||||
|
||||
return tp;
|
||||
|
||||
}
|
||||
|
||||
tf2_msgs::msg::TFMessage
|
||||
AssemblyConfigLoader::getTfData(const std::string &model_name) {
|
||||
tf2_msgs::msg::TFMessage tp;
|
||||
|
||||
if (!m_assembly_config.absolute_part.empty()) {
|
||||
for (auto &abs_part : m_assembly_config.absolute_part) {
|
||||
geometry_msgs::msg::TransformStamped abs_transrorm_stamped;
|
||||
abs_transrorm_stamped.transform = createTransform(abs_part.pose);
|
||||
abs_transrorm_stamped.child_frame_id = abs_part.name;
|
||||
abs_transrorm_stamped.header.frame_id = "world";
|
||||
abs_transrorm_stamped.header.stamp = m_clock->now();
|
||||
tp.transforms.push_back(abs_transrorm_stamped);
|
||||
}
|
||||
} else {
|
||||
RCLCPP_ERROR(m_logger, "Absolute parts is empty size: %zu",
|
||||
m_assembly_config.absolute_part.size());
|
||||
}
|
||||
|
||||
bool found_model = false;
|
||||
bool found_grasp_pose = false;
|
||||
if (!m_assembly_config.relative_part.empty()) {
|
||||
const auto &absolute_part_pose = m_assembly_config.absolute_part.pose;
|
||||
tp.transforms.emplace_back().transform = createTransform(absolute_part_pose);
|
||||
tp.transforms.emplace_back().child_frame_id = "world";
|
||||
tp.transforms.emplace_back().header.stamp = m_clock->now();
|
||||
for (auto &abs_poses : m_assembly_config.absolute_part) {
|
||||
geometry_msgs::msg::TransformStamped tmp;
|
||||
tmp.transform = createTransform(abs_poses.pose);
|
||||
tmp.child_frame_id = abs_poses.name;
|
||||
tmp.header.frame_id = "world";
|
||||
tmp.header.stamp = m_clock->now();
|
||||
tp.transforms.push_back(tmp);
|
||||
}
|
||||
|
||||
for (const auto &relative_part : m_assembly_config.relative_part) {
|
||||
// Find our model data
|
||||
if (relative_part.name == model_name) {
|
||||
tp.transforms.emplace_back().transform = createTransform(relative_part.pose);
|
||||
tp.transforms.emplace_back().child_frame_id = relative_part.relative_at;
|
||||
tp.transforms.emplace_back().header.stamp = m_clock->now();
|
||||
geometry_msgs::msg::TransformStamped tmp;
|
||||
tmp.transform = createTransform(relative_part.pose);
|
||||
tmp.child_frame_id = relative_part.name;
|
||||
tmp.header.frame_id = relative_part.relative_at;
|
||||
tmp.header.stamp = m_clock->now();
|
||||
tp.transforms.push_back(tmp);
|
||||
found_model = true;
|
||||
}
|
||||
RCLCPP_INFO(m_logger, "Model name [%s]", relative_part.name.c_str());
|
||||
RCLCPP_INFO(m_logger, "Model name [%s]",
|
||||
relative_part.relative_at.c_str());
|
||||
}
|
||||
} else {
|
||||
RCLCPP_ERROR(m_logger, "Relative parts is empty size: %zu", m_assembly_config.relative_part.size());
|
||||
RCLCPP_ERROR(m_logger, "Relative parts is empty size: %zu",
|
||||
m_assembly_config.relative_part.size());
|
||||
}
|
||||
|
||||
for (const auto &grasp_pose : m_assembly_config.grasp_pose) {
|
||||
if (grasp_pose.name == model_name) {
|
||||
tp.transforms.emplace_back().transform = createTransform(grasp_pose.pose);
|
||||
tp.transforms.emplace_back().child_frame_id = grasp_pose.name;
|
||||
tp.transforms.emplace_back().header.stamp = m_clock->now();
|
||||
if (grasp_pose.relative_at == model_name) {
|
||||
geometry_msgs::msg::TransformStamped tmp;
|
||||
tmp.transform = createTransform(grasp_pose.pose);
|
||||
tmp.child_frame_id = grasp_pose.name;
|
||||
tmp.header.frame_id = grasp_pose.relative_at;
|
||||
tmp.header.stamp = m_clock->now();
|
||||
tp.transforms.push_back(tmp);
|
||||
found_grasp_pose = true;
|
||||
}
|
||||
}
|
||||
|
@ -117,7 +220,8 @@ AssemblyConfigLoader::getTfData(const std::string &model_name) {
|
|||
RCLCPP_ERROR(m_logger, "Model %s not found in config", model_name.c_str());
|
||||
}
|
||||
if (!found_grasp_pose) {
|
||||
RCLCPP_ERROR(m_logger, "Grasp pose not found for model %s", model_name.c_str());
|
||||
RCLCPP_ERROR(m_logger, "Grasp pose not found for model %s",
|
||||
model_name.c_str());
|
||||
}
|
||||
|
||||
return tp;
|
||||
|
@ -136,56 +240,58 @@ AssemblyConfigLoader::createTransform(const geometry_msgs::msg::Pose &pose) {
|
|||
return transform;
|
||||
}
|
||||
|
||||
tf2_msgs::msg::TFMessage AssemblyConfigLoader::getWorkspace() {
|
||||
tf2_msgs::msg::TFMessage tf_msg;
|
||||
geometry_msgs::msg::PoseArray AssemblyConfigLoader::getWorkspace() {
|
||||
geometry_msgs::msg::PoseArray pose_array;
|
||||
pose_array.header.frame_id = "world";
|
||||
|
||||
if (m_assembly_config.workspace.empty()) {
|
||||
RCLCPP_WARN(m_logger, "Workspace is empty, check your robossembler_db");
|
||||
return tf_msg;
|
||||
return pose_array;
|
||||
}
|
||||
|
||||
const std::string child_frame_id = "world";
|
||||
pose_array.poses.reserve(m_assembly_config.workspace.size());
|
||||
|
||||
const double default_rotation_value = 0.0;
|
||||
const double default_rotation_w = 1.0;
|
||||
|
||||
tf_msg.transforms.reserve(m_assembly_config.workspace.size());
|
||||
for (std::size_t i = 0; i < m_assembly_config.workspace.size(); ++i) {
|
||||
tf_msg.transforms[i].transform.translation.x =
|
||||
m_assembly_config.workspace[i].x;
|
||||
tf_msg.transforms[i].transform.translation.y =
|
||||
m_assembly_config.workspace[i].y;
|
||||
tf_msg.transforms[i].transform.translation.z =
|
||||
m_assembly_config.workspace[i].z;
|
||||
tf_msg.transforms[i].child_frame_id = child_frame_id;
|
||||
tf_msg.transforms[i].transform.rotation.x = default_rotation_value;
|
||||
tf_msg.transforms[i].transform.rotation.y = default_rotation_value;
|
||||
tf_msg.transforms[i].transform.rotation.z = default_rotation_value;
|
||||
tf_msg.transforms[i].transform.rotation.w = default_rotation_w;
|
||||
tf_msg.transforms[i].header.stamp = m_clock->now();
|
||||
for (const auto &point : m_assembly_config.workspace) {
|
||||
geometry_msgs::msg::Pose pose;
|
||||
pose.position.x = point.x;
|
||||
pose.position.y = point.y;
|
||||
pose.position.z = point.z;
|
||||
pose.orientation.x = default_rotation_value;
|
||||
pose.orientation.y = default_rotation_value;
|
||||
pose.orientation.z = default_rotation_value;
|
||||
pose.orientation.w = default_rotation_w;
|
||||
pose_array.poses.push_back(pose);
|
||||
}
|
||||
return tf_msg;
|
||||
}
|
||||
|
||||
return pose_array;
|
||||
}
|
||||
|
||||
geometry_msgs::msg::PoseArray
|
||||
AssemblyConfigLoader::getWorkspaceInspectorTrajectory() {
|
||||
geometry_msgs::msg::PoseArray pose_array;
|
||||
pose_array.header.frame_id = "world";
|
||||
auto workspace = getWorkspace();
|
||||
for (auto &point : workspace.transforms) {
|
||||
auto pose = transformTrajectory(point);
|
||||
pose_array.poses.push_back(pose);
|
||||
|
||||
auto workspace_poses = getWorkspace();
|
||||
|
||||
for (const auto &pose : workspace_poses.poses) {
|
||||
pose_array.poses.push_back(transformTrajectory(pose));
|
||||
}
|
||||
|
||||
return pose_array;
|
||||
}
|
||||
|
||||
geometry_msgs::msg::Pose AssemblyConfigLoader::transformTrajectory(
|
||||
const geometry_msgs::msg::TransformStamped &pose) {
|
||||
auto pose_eigen = tf2::transformToEigen(pose.transform);
|
||||
const geometry_msgs::msg::Pose &pose) {
|
||||
Eigen::Isometry3d pose_eigen;
|
||||
tf2::fromMsg(pose, pose_eigen);
|
||||
|
||||
Eigen::AngleAxisd rotation(M_PI, Eigen::Vector3d::UnitY());
|
||||
pose_eigen.rotate(rotation);
|
||||
pose_eigen.translation().z() += 0.35;
|
||||
auto pose_msg = tf2::toMsg(pose_eigen);
|
||||
return pose_msg;
|
||||
return tf2::toMsg(pose_eigen);
|
||||
}
|
||||
|
||||
} // namespace rbs_utils
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
|
||||
string assets_db
|
||||
geometry_msgs/Point[] workspace
|
||||
rbs_utils_interfaces/NamedPose absolute_part
|
||||
rbs_utils_interfaces/NamedPose[] absolute_part
|
||||
rbs_utils_interfaces/RelativeNamedPose[] relative_part
|
||||
rbs_utils_interfaces/NamedPose[] grasp_pose
|
||||
rbs_utils_interfaces/RelativeNamedPose[] grasp_pose
|
||||
rbs_utils_interfaces/NamedPose[] extra_poses
|
||||
|
|
35
rbss_objectdetection/rbs_package.json
Normal file
35
rbss_objectdetection/rbs_package.json
Normal file
|
@ -0,0 +1,35 @@
|
|||
{
|
||||
"SkillPackage": {
|
||||
"name": "Robossembler", "version": "1.0", "format": "1"
|
||||
},
|
||||
"Module": {
|
||||
"name": "ObjectDetection", "description": "Object detection skill with YOLOv8"
|
||||
},
|
||||
"Launch": {
|
||||
"executable": "detection_lifecycle.py"
|
||||
},
|
||||
"Interface": {
|
||||
"Input": [
|
||||
{
|
||||
"name": "cameraLink", "type": "string", "group": "STD_USER"
|
||||
},
|
||||
{
|
||||
"name": "topicImage", "type": "Image", "group": "sensor_msgs.msg"
|
||||
}
|
||||
],
|
||||
"Output":
|
||||
[
|
||||
{
|
||||
"name": "boundBox", "type": "BoundBox", "group": ".msg"
|
||||
}
|
||||
]
|
||||
},
|
||||
"Settings": [
|
||||
{
|
||||
"name": "publishDelay", "value": 0.5
|
||||
},
|
||||
{
|
||||
"name": "xxxxxxxx", "value": "a0a0a0"
|
||||
}
|
||||
]
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue