Resolve "Добавить параметры в конфиг pose estimation узла"

This commit is contained in:
Igor Brylyov 2023-11-13 13:08:28 +00:00
parent 4371dbdcee
commit 077872e489
22 changed files with 221906 additions and 108 deletions

View file

@ -21,7 +21,7 @@ public:
IgnTf2Broadcaster(const rclcpp::NodeOptions & options)
: env_manager::component_manager::NodeComponent(options)
{
_follow_frames = {"box1", "box2", "box3", "box4", "box5", "box6"};
_follow_frames = {"box1", "box2", "box3", "box4", "box5", "box6", "fork_gt"};
std::string topic_name = "/world/" + DEFAULT_IGN_WORLD_NAME + "/dynamic_pose/info";
// init broadcaster
_tf2_broadcaster = std::make_unique<tf2_ros::TransformBroadcaster>(*this);

View file

@ -2,13 +2,13 @@
GROUND_TRUE = {
namespace = "ground_true",
components = {
camera_node = {
lib = "libign_camera_component.so",
class = "camera_component::IgnCameraComponent"
},
-- camera_node = {
-- lib = "libign_camera_component.so",
-- class = "camera_component::IgnCameraComponent"
-- },
scene_component = {
lib = "libign_detect_object_component.so",
class = "scene_component::IgnDetectObjectService"
lib = "libign_tf2_broadcaster_component.so",
class = "scene_component::IgnTf2Broadcaster"
},
}
}

View file

@ -6,7 +6,7 @@ NODES = {
},
scene_component = {
name = "scene_component",
type = "Service",
msg_type = "component_interfaces/DetectObject"
type = "Publisher",
msg_type = "tf2_msgs/tf_message"
},
}

View file

@ -14,8 +14,4 @@ repositories:
ros2_robotiq_gripper:
type: git
url: https://github.com/solid-sinusoid/ros2_robotiq_gripper.git
version: dev
cartesian_controllers:
type: git
url: https://github.com/fzi-forschungszentrum-informatik/cartesian_controllers.git
version: ros2
version: dev

View file

@ -12,7 +12,7 @@ find_package(ament_cmake REQUIRED)
# find_package(<dependency> REQUIRED)
install(
DIRECTORY launch
DIRECTORY launch config
DESTINATION share/${PROJECT_NAME}
)

View file

@ -0,0 +1,622 @@
Panels:
- Class: rviz_common/Displays
Help Height: 87
Name: Displays
Property Tree Widget:
Expanded:
- /Global Options1
- /TF1
- /TF1/Frames1
Splitter Ratio: 0.49496981501579285
Tree Height: 778
- Class: rviz_common/Selection
Name: Selection
- Class: rviz_common/Tool Properties
Expanded:
- /2D Goal Pose1
- /Publish Point1
Name: Tool Properties
Splitter Ratio: 0.5886790156364441
- Class: rviz_common/Views
Expanded:
- /Current View1
Name: Views
Splitter Ratio: 0.5
Visualization Manager:
Class: ""
Displays:
- Alpha: 0.5
Cell Size: 1
Class: rviz_default_plugins/Grid
Color: 160; 160; 164
Enabled: true
Line Style:
Line Width: 0.029999999329447746
Value: Lines
Name: Grid
Normal Cell Count: 0
Offset:
X: 0
Y: 0
Z: 0
Plane: XY
Plane Cell Count: 10
Reference Frame: <Fixed Frame>
Value: true
- Class: rviz_default_plugins/MarkerArray
Enabled: true
Name: MarkerArray
Namespaces:
{}
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /display_contacts
Value: true
- Class: moveit_rviz_plugin/PlanningScene
Enabled: true
Move Group Namespace: ""
Name: PlanningScene
Planning Scene Topic: /monitored_planning_scene
Robot Description: robot_description
Scene Geometry:
Scene Alpha: 0.8999999761581421
Scene Color: 50; 230; 50
Scene Display Time: 0.009999999776482582
Show Scene Geometry: true
Voxel Coloring: Z-Axis
Voxel Rendering: Occupied Voxels
Scene Robot:
Attached Body Color: 150; 50; 150
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base:
Alpha: 1
Show Axes: false
Show Trail: false
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
base_link_inertia:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
box1_place:
Alpha: 1
Show Axes: false
Show Trail: false
box2_place:
Alpha: 1
Show Axes: false
Show Trail: false
box3_place:
Alpha: 1
Show Axes: false
Show Trail: false
box4_place:
Alpha: 1
Show Axes: false
Show Trail: false
box5_place:
Alpha: 1
Show Axes: false
Show Trail: false
box6_place:
Alpha: 1
Show Axes: false
Show Trail: false
flange:
Alpha: 1
Show Axes: false
Show Trail: false
forearm_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
ft_frame:
Alpha: 1
Show Axes: false
Show Trail: false
inner_rgbd_camera:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
outer_rgbd_camera:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_left_finger_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_left_finger_tip_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_left_inner_knuckle_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_left_knuckle_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_right_finger_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_right_finger_tip_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_right_inner_knuckle_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_right_knuckle_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_grasp_point:
Alpha: 1
Show Axes: false
Show Trail: false
shoulder_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
tool0:
Alpha: 1
Show Axes: false
Show Trail: false
upper_arm_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
world:
Alpha: 1
Show Axes: false
Show Trail: false
wrist_1_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
wrist_2_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
wrist_3_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Robot Alpha: 1
Show Robot Collision: false
Show Robot Visual: true
Value: true
- Class: rviz_default_plugins/TF
Enabled: true
Frame Timeout: 15
Frames:
All Enabled: false
base:
Value: false
base_link:
Value: false
base_link_inertia:
Value: false
box1:
Value: false
box1_place:
Value: false
box2:
Value: false
box2_place:
Value: false
box3:
Value: false
box3_place:
Value: false
box4:
Value: false
box4_place:
Value: false
box5:
Value: false
box5_place:
Value: false
box6:
Value: false
box6_place:
Value: false
flange:
Value: false
forearm_link:
Value: false
fork_gt:
Value: true
ft_frame:
Value: false
inner_rgbd_camera:
Value: false
outer_rgbd_camera:
Value: false
robotiq_85_base_link:
Value: false
robotiq_85_left_finger_link:
Value: false
robotiq_85_left_finger_tip_link:
Value: false
robotiq_85_left_inner_knuckle_link:
Value: false
robotiq_85_left_knuckle_link:
Value: false
robotiq_85_right_finger_link:
Value: false
robotiq_85_right_finger_tip_link:
Value: false
robotiq_85_right_inner_knuckle_link:
Value: false
robotiq_85_right_knuckle_link:
Value: false
robotiq_grasp_point:
Value: false
shoulder_link:
Value: false
tool0:
Value: false
upper_arm_link:
Value: false
world:
Value: false
wrist_1_link:
Value: false
wrist_2_link:
Value: false
wrist_3_link:
Value: false
Marker Scale: 0.4000000059604645
Name: TF
Show Arrows: true
Show Axes: true
Show Names: false
Tree:
world:
base_link:
base:
{}
base_link_inertia:
shoulder_link:
upper_arm_link:
forearm_link:
wrist_1_link:
wrist_2_link:
wrist_3_link:
flange:
tool0:
inner_rgbd_camera:
{}
robotiq_85_base_link:
robotiq_85_left_inner_knuckle_link:
{}
robotiq_85_left_knuckle_link:
robotiq_85_left_finger_link:
robotiq_85_left_finger_tip_link:
{}
robotiq_85_right_inner_knuckle_link:
{}
robotiq_85_right_knuckle_link:
robotiq_85_right_finger_link:
robotiq_85_right_finger_tip_link:
{}
robotiq_grasp_point:
{}
ft_frame:
{}
box1:
{}
box1_place:
{}
box2:
{}
box2_place:
{}
box3:
{}
box3_place:
{}
box4:
{}
box4_place:
{}
box5:
{}
box5_place:
{}
box6:
{}
box6_place:
{}
fork_gt:
{}
outer_rgbd_camera:
{}
Update Interval: 0
Value: true
- Class: moveit_rviz_plugin/Trajectory
Color Enabled: false
Enabled: true
Interrupt Display: false
Links:
All Links Enabled: true
Expand Joint Details: false
Expand Link Details: false
Expand Tree: false
Link Tree Style: Links in Alphabetic Order
base:
Alpha: 1
Show Axes: false
Show Trail: false
base_link:
Alpha: 1
Show Axes: false
Show Trail: false
base_link_inertia:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
box1_place:
Alpha: 1
Show Axes: false
Show Trail: false
box2_place:
Alpha: 1
Show Axes: false
Show Trail: false
box3_place:
Alpha: 1
Show Axes: false
Show Trail: false
box4_place:
Alpha: 1
Show Axes: false
Show Trail: false
box5_place:
Alpha: 1
Show Axes: false
Show Trail: false
box6_place:
Alpha: 1
Show Axes: false
Show Trail: false
flange:
Alpha: 1
Show Axes: false
Show Trail: false
forearm_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
ft_frame:
Alpha: 1
Show Axes: false
Show Trail: false
inner_rgbd_camera:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
outer_rgbd_camera:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_base_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_left_finger_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_left_finger_tip_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_left_inner_knuckle_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_left_knuckle_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_right_finger_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_right_finger_tip_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_right_inner_knuckle_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_85_right_knuckle_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
robotiq_grasp_point:
Alpha: 1
Show Axes: false
Show Trail: false
shoulder_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
tool0:
Alpha: 1
Show Axes: false
Show Trail: false
upper_arm_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
world:
Alpha: 1
Show Axes: false
Show Trail: false
wrist_1_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
wrist_2_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
wrist_3_link:
Alpha: 1
Show Axes: false
Show Trail: false
Value: true
Loop Animation: false
Name: Trajectory
Robot Alpha: 0.5
Robot Color: 150; 50; 150
Robot Description: robot_description
Show Robot Collision: false
Show Robot Visual: true
Show Trail: false
State Display Time: 3x
Trail Step Size: 1
Trajectory Topic: /display_planned_path
Use Sim Time: false
Value: true
Enabled: true
Global Options:
Background Color: 48; 48; 48
Fixed Frame: world
Frame Rate: 30
Name: root
Tools:
- Class: rviz_default_plugins/Interact
Hide Inactive Objects: true
- Class: rviz_default_plugins/MoveCamera
- Class: rviz_default_plugins/Select
- Class: rviz_default_plugins/FocusCamera
- Class: rviz_default_plugins/Measure
Line color: 128; 128; 0
- Class: rviz_default_plugins/SetInitialPose
Covariance x: 0.25
Covariance y: 0.25
Covariance yaw: 0.06853891909122467
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /initialpose
- Class: rviz_default_plugins/SetGoal
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /goal_pose
- Class: rviz_default_plugins/PublishPoint
Single click: true
Topic:
Depth: 5
Durability Policy: Volatile
History Policy: Keep Last
Reliability Policy: Reliable
Value: /clicked_point
Transformation:
Current:
Class: rviz_default_plugins/TF
Value: true
Views:
Current:
Class: rviz_default_plugins/Orbit
Distance: 6.619869709014893
Enable Stereo Rendering:
Stereo Eye Separation: 0.05999999865889549
Stereo Focal Distance: 1
Swap Stereo Eyes: false
Value: false
Focal Point:
X: 0.2489434778690338
Y: -0.013962505385279655
Z: 0.13800443708896637
Focal Shape Fixed Size: true
Focal Shape Size: 0.05000000074505806
Invert Z Axis: false
Name: Current View
Near Clip Distance: 0.009999999776482582
Pitch: 0.4353981614112854
Target Frame: <Fixed Frame>
Value: Orbit (rviz)
Yaw: 2.2503976821899414
Saved: ~
Window Geometry:
Displays:
collapsed: false
Height: 1016
Hide Left Dock: false
Hide Right Dock: false
QMainWindow State: 000000ff00000000fd0000000400000000000001f30000039efc020000000cfb0000001200530065006c0065006300740069006f006e00000001e10000009b0000005c00fffffffb0000001e0054006f006f006c002000500072006f007000650072007400690065007302000001ed000001df00000185000000a3fb000000120056006900650077007300200054006f006f02000001df000002110000018500000122fb000000200054006f006f006c002000500072006f0070006500720074006900650073003203000002880000011d000002210000017afb000000100044006900730070006c006100790073010000003d0000039e000000c900fffffffb0000002000730065006c0065006300740069006f006e00200062007500660066006500720200000138000000aa0000023a00000294fb00000014005700690064006500530074006500720065006f02000000e6000000d2000003ee0000030bfb0000000c004b0069006e0065006300740200000186000001060000030c00000261fb00000044004d006f00740069006f006e0050006c0061006e006e0069006e00670020002d0020005400720061006a006500630074006f0072007900200053006c00690064006500720000000000ffffffff0000000000000000fb0000001c004d006f00740069006f006e0050006c0061006e006e0069006e0067010000032c000002170000000000000000fb000000280020002d0020005400720061006a006500630074006f0072007900200053006c00690064006500720000000000ffffffff0000000000000000fb0000003c005400720061006a006500630074006f007200790020002d0020005400720061006a006500630074006f0072007900200053006c00690064006500720000000000ffffffff0000004100ffffff00000001000001100000039efc0200000003fb0000001e0054006f006f006c002000500072006f00700065007200740069006500730100000041000000780000000000000000fb0000000a00560069006500770073010000003d0000039e000000a400fffffffb0000001200530065006c0065006300740069006f006e010000025a000000b200000000000000000000000200000490000000a9fc0100000001fb0000000a00560069006500770073030000004e00000080000002e10000019700000003000004420000003efc0100000002fb0000000800540069006d00650100000000000004420000000000000000fb0000000800540069006d006501000000000000045000000000000000000000042b0000039e00000004000000040000000800000008fc0000000100000002000000010000000a0054006f006f006c00730100000000ffffffff0000000000000000
Selection:
collapsed: false
Tool Properties:
collapsed: false
Trajectory - Trajectory Slider:
collapsed: false
Views:
collapsed: false
Width: 1850
X: 70
Y: 27

View file

@ -192,7 +192,7 @@ def generate_launch_description():
)
rviz_config_file = PathJoinSubstitution(
[FindPackageShare(moveit_config_package), "rviz", "view_robot.rviz"]
[FindPackageShare("rbs_bringup"), "config", "fork_view.rviz"]
)
mujoco_model = PathJoinSubstitution(

View file

@ -19,6 +19,8 @@ find_package(rbs_skill_interfaces REQUIRED)
find_package(behavior_tree REQUIRED)
find_package(control_msgs REQUIRED)
find_package(component_interfaces REQUIRED)
find_package(lifecycle_msgs REQUIRED)
find_package(rcl_interfaces REQUIRED)
if (NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17)
@ -39,6 +41,8 @@ set(dependencies
std_msgs
control_msgs
component_interfaces
lifecycle_msgs
rcl_interfaces
)
include_directories(include)
@ -61,8 +65,8 @@ list(APPEND plugin_libs rbs_add_planning_scene_object)
add_library(rbs_assemble_process_state SHARED src/AssembleProcessState.cpp)
list(APPEND plugin_libs rbs_assemble_process_state)
add_library(rbs_detect_object SHARED src/DetectObject.cpp)
list(APPEND plugin_libs rbs_detect_object)
add_library(rbs_pose_estimation SHARED src/PoseEstimation.cpp)
list(APPEND plugin_libs rbs_pose_estimation)
foreach(bt_plugin ${plugin_libs})
ament_target_dependencies(${bt_plugin} ${dependencies})

View file

@ -1,51 +0,0 @@
#include <string>
#include <behavior_tree/BtService.hpp>
#include <rclcpp/rclcpp.hpp>
#include <component_interfaces/srv/detect_object.hpp>
using DetectObjectSrv = component_interfaces::srv::DetectObject;
class DetectObject: public BtService<DetectObjectSrv>
{
public:
DetectObject(const std::string &name, const BT::NodeConfiguration &config)
: BtService<DetectObjectSrv>(name, config)
{
RCLCPP_INFO_STREAM(_node->get_logger(), "Start node.");
}
DetectObjectSrv::Request::SharedPtr populate_request()
{
auto request = std::make_shared<DetectObjectSrv::Request>();
auto req_kind = getInput<std::string>("ReqKind").value();
if (req_kind == "DETECT")
request->req_kind = 0;
else if (req_kind == "FOLLOW")
request->req_kind = 1;
else
request->req_kind = 2;
request->object_name = getInput<std::string>("ObjectName").value();
return request;
}
BT::NodeStatus handle_response(DetectObjectSrv::Response::SharedPtr response)
{
return response->call_status? BT::NodeStatus::SUCCESS: BT::NodeStatus::FAILURE;
}
static BT::PortsList providedPorts()
{
return providedBasicPorts({
BT::InputPort<std::string>("ReqKind"),
BT::InputPort<std::string>("ObjectName"),
});
}
};
#include "behaviortree_cpp_v3/bt_factory.h"
BT_REGISTER_NODES(factory)
{
factory.registerNodeType<DetectObject>("DetectObject");
}

View file

@ -0,0 +1,132 @@
#include <ament_index_cpp/get_package_share_directory.hpp>
#include <behavior_tree/BtService.hpp>
#include <behaviortree_cpp_v3/basic_types.h>
#include <string>
#include "rcl_interfaces/msg/parameter.hpp"
#include "rcl_interfaces/srv/set_parameters.hpp"
#include "lifecycle_msgs/msg/transition.hpp"
#include "lifecycle_msgs/srv/change_state.hpp"
// #include <component_interfaces/srv/detect_object.hpp>
using PoseEstimationSrv = lifecycle_msgs::srv::ChangeState;
class PoseEstimation : public BtService<PoseEstimationSrv> {
public:
PoseEstimation(const std::string &name, const BT::NodeConfiguration &config)
: BtService<PoseEstimationSrv>(name, config) {
RCLCPP_INFO_STREAM(_node->get_logger(), "Start node.");
_set_params_client =
this->_node->create_client<rcl_interfaces::srv::SetParameters>(
"/image_sub2/set_parameters");
_model_name = getInput<std::string>("ObjectName").value();
}
PoseEstimationSrv::Request::SharedPtr populate_request() {
auto request = std::make_shared<PoseEstimationSrv::Request>();
_req_type = getInput<std::string>("ReqKind").value();
request->set__transition(transition_event(_req_type));
return request;
}
BT::NodeStatus
handle_response(const PoseEstimationSrv::Response::SharedPtr response) {
if (response->success) {
return BT::NodeStatus::SUCCESS;
}
return BT::NodeStatus::FAILURE;
}
static BT::PortsList providedPorts() {
return providedBasicPorts({
BT::InputPort<std::string>("ReqKind"),
BT::InputPort<std::string>("ObjectName"),
BT::InputPort<std::string>("ObjectPath"),
});
}
private:
uint8_t transition_id_{};
std::string _model_name{};
std::string _model_path{};
std::string _req_type;
rclcpp::Client<rcl_interfaces::srv::SetParameters>::SharedPtr
_set_params_client;
std::vector<rcl_interfaces::msg::Parameter> _params;
rcl_interfaces::msg::Parameter _param;
lifecycle_msgs::msg::Transition
transition_event(const std::string &req_type) {
lifecycle_msgs::msg::Transition translation{};
if (req_type == "configure") {
set_mesh_param();
transition_id_ = lifecycle_msgs::msg::Transition::TRANSITION_CONFIGURE;
} else if (req_type == "activate") {
transition_id_ = lifecycle_msgs::msg::Transition::TRANSITION_ACTIVATE;
} else if (req_type == "deactivate") {
transition_id_ = lifecycle_msgs::msg::Transition::TRANSITION_DEACTIVATE;
} else if (req_type == "cleanup") {
transition_id_ = lifecycle_msgs::msg::Transition::TRANSITION_CLEANUP;
}
translation.set__id(transition_id_);
return translation;
}
inline std::string build_model_path(const std::string &model_name,
const std::string &package_path) {
return package_path + "/config/" + model_name + ".ply";
}
inline std::string build_model_path(const std::string &model_path) {
return model_path;
}
void set_mesh_param() {
auto _package_name =
ament_index_cpp::get_package_share_directory("rbs_perception");
_model_path = build_model_path(_model_name, _package_name);
auto param_request =
std::make_shared<rcl_interfaces::srv::SetParameters::Request>();
rcl_interfaces::msg::ParameterValue val;
val.set__string_value(_model_path);
_param.name = "mesh_path";
_param.value = val;
_params.push_back(_param);
param_request->set__parameters(_params);
while (!_set_params_client->wait_for_service(1s)) {
if (!rclcpp::ok()) {
RCLCPP_ERROR(_node->get_logger(),
"Interrupted while waiting for the service. Exiting.");
break;
}
RCLCPP_INFO(_node->get_logger(),
"service not available, waiting again...");
}
auto response = send_request(_node, _set_params_client, param_request);
}
rcl_interfaces::srv::SetParameters::Response::SharedPtr send_request(
rclcpp::Node::SharedPtr node,
rclcpp::Client<rcl_interfaces::srv::SetParameters>::SharedPtr client,
rcl_interfaces::srv::SetParameters::Request::SharedPtr request) {
auto result = client->async_send_request(request);
// Wait for the result.
if (rclcpp::spin_until_future_complete(node, result) ==
rclcpp::FutureReturnCode::SUCCESS) {
return result.get();
} else {
return NULL;
}
}
};
#include "behaviortree_cpp_v3/bt_factory.h"
BT_REGISTER_NODES(factory) {
factory.registerNodeType<PoseEstimation>("PoseEstimation");
}

View file

@ -35,6 +35,7 @@ install(PROGRAMS
scripts/detection_service.py
scripts/grasp_marker_publish.py
scripts/pose_estimation.py
scripts/pose_estimation_lifecycle.py
DESTINATION lib/${PROJECT_NAME}
)

View file

@ -1,16 +1,53 @@
# Описание работы узла по оценке положения объекта
# Оценка 6D положения объекта
Есть два варианта работы узла по оценке 6D позы объекта. Первый предполагает использование ROS-узла как сервиса, второй - как [lifecycle-узла](https://design.ros2.org/articles/node_lifecycle.html).
Узел с управляемым жизненным циклом (lifecycle) позволяет лучше контролировать состояние системы ROS. Такой подход позволит системе убедиться, что все компоненты были созданы правильно, прежде чем любой компонент сможет начать выполнение своей задачи. Это также позволит перезапускать или заменять узел в режиме онлайн.
Так как задача оценки позиции объекта требует использования больших вычислительных ресурсов, то реализация lifecycle-узла позволяет управлять загрузкой ресурсов при этом.
## Вариант 1. Сервис по оценке позы объекта
Запуск узла производится командой:
```bash
ros2 run rbs_perception pose_estimation.py
```
Запускается узел с именем 'image_sub2' по умолчанию. В нём создаётся сервис для распознавания позиции с именем 'detect6Dpose', который ожидает клиентский запрос.
Запускается узел с именем `image_sub2` по умолчанию. В нём создаётся сервис для распознавания позиции с именем `detect6Dpose`, который ожидает клиентский запрос.
Для получения позиции заданного объекта клиент посылает сервису запрос с параметром ObjectInfo на входе
- id - идентификатор,
- name - имя объекта,
- mesh_path - путь к mesh-файлу в формате *.ply.
- `id` - идентификатор,
- `name` - имя объекта,
- `mesh_path` - путь к mesh-файлу в формате *.ply.
При получении запроса сервис 'detect6Dpose' подписывается на Image-сообщения с камеры '/ground_true/camera_node', которые использует для запуска алгоритма 6D оценки позиции Megapose. После получения результата от Megapose сервис публикует сообщение с позицией (Quaternion) в 'pose6D_[obj]' topic.
При получении запроса сервис `detect6Dpose` подписывается на Image-сообщения с камеры `/ground_true/camera_node`, которые использует для запуска алгоритма 6D оценки позиции Megapose. После получения результата от Megapose сервис публикует сообщение с позицией (Quaternion) в `pose6D_[obj]` topic.
## Вариант 2. Lifecycle-узел ROS для получения 6D-позы
Запуск узла производится командой:
```bash
ros2 run rbs_perception pose_estimation_lifecycle.py
```
Запускается узел с именем `image_sub2` по умолчанию. Настроечные переменные задаются в файле конфигурации `config/pose_estimation_config.json`. Пример:
```json
{
"nodeName":"image_sub2",
"topicImage":"/outer_rgbd_camera/image",
"topicCameraInfo":"/outer_rgbd_camera/camera_info",
"topicDepth":"/outer_rgbd_camera/depth_image",
"topicSrv":"/image_sub2/detect6Dpose",
"publishDelay": 3.3,
"tf2_send_pose": 1
}
```
- `"nodeName"`- это поле указывает на имя узла в контексте ROS;
- `"topicImage"`- это поле определяет топик, по которому публикуются изображения, полученные с RGBD-камеры;
- `"topicCameraInfo"`- это поле указывает на топик, по которому публикуется информация о камере, такая как параметры калибровки и настройки;
- `"topicDepth"`- это поле определяет топик для изображений глубины, получаемых от RGBD-камеры. Изображения глубины предоставляют информацию о расстоянии до объектов в сцене;
- `"topicSrv"`- это поле определяет топик, по которому публикуется 6D-поза объекта после оценки;
- `"publishDelay"`- это поле, указывает задержку (в секундах) перед публикацией сообщений в топики;
- `"tf2_send_pose"`- это поле связано с отправкой данных о позе (положении и ориентации) в системе tf2, которая используется в ROS для управления координатными преобразованиями. Значение 1 означает включение или активацию данной функции (0 - отключение).
Первым этапом работы узла будет являться его настройка вызовом `on_configure()`, перед которым необходимо установить параметр узла `"mesh_path"`. Этот строковый параметр должен содержать полный путь к файлу сетчатой модели объекта в формате *.ply.
Если конфигурирование завершено успешно, узел перейдёт в состояние `'inactive'`. Затем нужно узел сделать активным, вызвав `on_activate()`. При этом активируется подписчик на изображения с камеры (`"topicImage"`), в обратном вызове которого будет происходить распознавание позиции объекта (megapose) и его публикация в топике `"topicSrv"`.
Чтобы отключить распознавание нужно вызвать событие `on_deactivate()`.
Для новой настройки узла при необходимости оценки позы другого объекта нужно вызвать событие `on_cleanup()`, а затем повторить процедуру конфигурирования и активации.
### Важное замечание
---

View file

@ -1,5 +1,5 @@
{
"topicName":"image_sub",
"topicImage":"/ground_true/camera_node",
"topicImage":"/outer_rgbd_camera/image",
"weightsFile":"yolov4_objs2_final.weights"
}

220438
rbs_perception/config/fork.ply Normal file

File diff suppressed because it is too large Load diff

View file

@ -1,5 +1,14 @@
{
"topicName":"image_sub2",
"topicImage":"/ground_true/camera_node",
"weightsFile":"yolov4_objs2_final.weights"
"nodeName":"image_sub2",
"topicImage":"/outer_rgbd_camera/image",
"topicCameraInfo":"/outer_rgbd_camera/camera_info",
"topicDepth":"/outer_rgbd_camera/depth_image",
"publishDelay": 3.3,
"tf2_send_pose": 1,
"camera_info": {
"fx": 924.855,
"fy": 923.076,
"width": 640,
"height": 480
}
}

View file

@ -3,14 +3,15 @@
detection_service
ROS 2 program for 6D Pose Estimation
@shalenikol release 0.1
@shalenikol release 0.2
"""
# Import the necessary libraries
import rclpy # Python library for ROS 2
from rclpy.node import Node # Handles the creation of nodes
from sensor_msgs.msg import Image # Image is the message type
from geometry_msgs.msg import Quaternion
from geometry_msgs.msg import Quaternion, TransformStamped
from tf2_ros import TransformBroadcaster
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
import cv2 # OpenCV library
from rbs_skill_interfaces.srv import DetectObject
@ -23,9 +24,11 @@ import tempfile
from pathlib import Path
import numpy as np
from ament_index_python.packages import get_package_share_directory
import megapose
# import megapose
from megapose.scripts.run_inference_on_example import run_inference
tf2_send_pose = True
"""
# encoder for numpy array
def np_encoder(object):
@ -44,49 +47,51 @@ class ImageSubscriber(Node):
y = json.load(f)
for name, val in y.items():
if name == "topicName":
self.topicName = val
if name == "nodeName":
self.nodeName = val
elif name == "topicImage":
self.topicImage = val
elif name == "topicPubName":
self.topicPubName = val
elif name == "topicSrv":
self.topicSrv = val
elif name == "tf2_send_pose":
self.tf2_send_pose = val
elif name == "camera_info":
self.K_, self.res_ = self._getCameraParam(val)
def _getCameraParam(self):
# {"K": [[924.855, 0.0, 320.0], [0.0, 923.076, 240.0], [0.0, 0.0, 1.0]], "resolution": [480, 640]}
def _getCameraParam(self, info):
"""
focal length: 25 mm (field of view: 38.1712°)
sensor type: Micro Four Thirds System (Стандарт MFT микро 4/3)
sensor_width: 17.3 mm
sensor_height: 13 mm
Returns the intrinsic matrix and resolution from the provided camera info.
"""
resolution = [480, 640] #np.array([480, 640])
intrinsic_matrix = [ #np.array([
[924.855, 0.0, resolution[1] / 2.0],
[0.0, 923.076, resolution[0] / 2.0],
[0.0, 0.0, 1.0]
] #)
intrinsic_matrix = [
[info["fx"], 0.0, info["width"] / 2.0],
[0.0, info["fy"], info["height"] / 2.0],
[0.0, 0.0, 1.0]
]
resolution = [info["height"], info["width"]]
return intrinsic_matrix, resolution
def __init__(self):
"""
Class constructor to set up the node
"""
self.topicName = "image_sub2"
self.topicImage = "/ground_true/camera_node"
self.topicPubName = "pose6D_images"
self.topicSrv = "detect6Dpose"
self.nodeName = "image_sub2"
self.topicImage = "/outer_rgbd_camera/image"
self.topicPubName = self.nodeName + "/pose6D_images"
self.topicSrv = self.nodeName + "/detect6Dpose"
self._InitService()
self.tmpdir = tempfile.gettempdir()
self.mytemppath = Path(self.tmpdir) / "rbs_per"
self.mytemppath.mkdir(exist_ok=True)
self.K_, self.res_ = self._getCameraParam()
#os.environ["MEGAPOSE_DATA_DIR"] = str(self.mytemppath)
# Initiate the Node class's constructor and give it a name
super().__init__(self.topicName)
super().__init__(self.nodeName)
# Initialize the transform broadcaster
self.tf_broadcaster = TransformBroadcaster(self)
self.subscription = None
self.objName = ""
@ -101,14 +106,15 @@ class ImageSubscriber(Node):
self.service = self.create_service(DetectObject, self.topicSrv, self.service_callback)
def service_callback(self, request, response):
self.get_logger().info(f"Incoming request for object detection ObjectInfo(name: {request.object.name}, mesh_path: {request.object.mesh_path})")
self.get_logger().info(f"Incoming request for pose estimation ObjectInfo(name: {request.object.name}, mesh_path: {request.object.mesh_path})")
if not os.path.isfile(request.object.mesh_path):
response.call_status = False
response.error_msg = f"{request.object.mesh_path}: no such file"
return response
if request.object.id == -1:
self.subscription = None # ? сброс подписки
self.subscription = None # ? сброс подпискиpython -m megapose.scripts.download --example_data
response.call_status = True
return response
@ -126,7 +132,7 @@ class ImageSubscriber(Node):
output_fn = tPath / "object_data.json"
output_json_dict = {
"label": self.objName,
"bbox_modal": [2,2,self.res_[1]-1,self.res_[0]-1]
"bbox_modal": [2,2,self.res_[1]-4,self.res_[0]-4]
}
data = []
data.append(output_json_dict)
@ -146,7 +152,7 @@ class ImageSubscriber(Node):
}
data = []
data.append(output_json_dict)
output_fn.write_text(json.dumps(data))
output_fn.write_text(json.dumps(output_json_dict))
# Create the subscriber. This subscriber will receive an Image from the video_frames topic. The queue size is 3 messages.
self.subscription = self.create_subscription(Image, self.topicImage, self.listener_callback, 3)
@ -166,6 +172,29 @@ class ImageSubscriber(Node):
data = "No result file: '" + str(f) + "'"
return data
def tf_obj_pose(self,pose):
"""
Передача позиции объекта в tf2
"""
t = TransformStamped()
# assign pose to corresponding tf variables
t.header.stamp = self.get_clock().now().to_msg()
t.header.frame_id = 'world'
t.child_frame_id = self.objName
# coordinates
tr = pose[1]
t.transform.translation.x = tr[0]
t.transform.translation.y = tr[1]
t.transform.translation.z = tr[2]
# rotation
q = pose[0]
t.transform.rotation.x = q[1] # 0
t.transform.rotation.y = q[2] # 1
t.transform.rotation.z = q[3] # 2
t.transform.rotation.w = q[0] # 3
# Send the transformation
self.tf_broadcaster.sendTransform(t)
def listener_callback(self, data):
"""
Callback function.
@ -182,16 +211,19 @@ class ImageSubscriber(Node):
# 6D pose estimation
self.get_logger().info(f"megapose: begin {self.cnt}")
print(self.objPath)
run_inference(self.objPath,"megapose-1.0-RGB-multi-hypothesis")
# опубликуем результат оценки позы
data = self.load_result(self.objPath)
if data[0] == "[":
y = json.loads(data)[0]
pose = y['TWO']
pose = y["TWO"]
quat = pose[0]
#pose[1] - 3D перемещение
self.publisher.publish(Quaternion(x=quat[1],y=quat[2],z=quat[3],w=quat[0]))
if tf2_send_pose:
self.tf_obj_pose(pose)
self.get_logger().info(f"megapose: end {self.cnt}")

View file

@ -0,0 +1,342 @@
#!/usr/bin/env python3
"""
pose_estimation_lifecycle_node
ROS 2 program for 6D Pose Estimation
@shalenikol release 0.3
"""
from typing import Optional
import os
import shutil
import json
import tempfile
from pathlib import Path
import rclpy
from rclpy.lifecycle import Node
from rclpy.lifecycle import Publisher
from rclpy.lifecycle import State
from rclpy.lifecycle import TransitionCallbackReturn
from rclpy.timer import Timer
from ament_index_python.packages import get_package_share_directory
from sensor_msgs.msg import Image, CameraInfo
from geometry_msgs.msg import Pose, TransformStamped
from tf2_ros import TransformBroadcaster
from cv_bridge import CvBridge # Package to convert between ROS and OpenCV Images
import cv2 # OpenCV library
from megapose.scripts.run_inference_on_example import ModelPreload, run_inference_rbs #, run_inference
class PoseEstimator(Node):
"""Our lifecycle node."""
def _InitService(self):
# Initialization service data
p = os.path.join(get_package_share_directory("rbs_perception"), "config", "pose_estimation_config.json")
# load config
with open(p, "r") as f:
y = json.load(f)
for name, val in y.items():
if name == "nodeName":
self.nodeName = val
elif name == "topicImage":
self.topicImage = val
elif name == "topicCameraInfo":
self.topicCameraInfo = val
elif name == "topicDepth":
self.topicDepth = val
elif name == "publishDelay":
self.publishDelay = val
elif name == "topicSrv":
self.topicSrv = val
elif name == "tf2_send_pose":
self.tf2_send_pose = val
def __init__(self, node_name, **kwargs):
"""Construct the node."""
self._count: int = 0
self._pub: Optional[Publisher] = None
self._timer: Optional[Timer] = None
self._image_cnt: int = 0
self._sub = None
self._sub_info = None
self._sub_depth = None
self._is_camerainfo = False
self._K = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
self._res = [0, 0]
self._pose = [[1., 0., 0., 0.], [0., 0., 0.]]
self.tf2_send_pose = 0
self.megapose_model = None
self.nodeName = node_name #"image_sub2"
self.topicImage = "/outer_rgb_camera/image"
self.topicCameraInfo = "/outer_rgb_camera/camera_info"
self.topicDepth = "/outer_rgbd_camera/depth_image"
self.publishDelay = 2.0
self.topicSrv = self.nodeName + "/detect6Dpose"
self._InitService()
self.tmpdir = tempfile.gettempdir()
self.mytemppath = Path(self.tmpdir) / "rbs_per"
self.mytemppath.mkdir(exist_ok=True)
super().__init__(self.nodeName, **kwargs)
self.declare_parameter("mesh_path", "")
# Initialize the transform broadcaster
self.tf_broadcaster = TransformBroadcaster(self)
# Used to convert between ROS and OpenCV images
self.br = CvBridge()
self.objName = ""
self.objMeshFile = ""
self.objPath = ""
def publish(self):
"""Publish a new message when enabled."""
self._count += 1
if self._pub is not None and self._pub.is_activated:
# опубликуем результат оценки позы
q = self._pose[0]
t = self._pose[1]
p = Pose()
p.position.x = t[0]
p.position.y = t[1]
p.position.z = t[2]
p.orientation.w = q[0]
p.orientation.x = q[1]
p.orientation.y = q[2]
p.orientation.z = q[3]
self._pub.publish(p)
if self.tf2_send_pose:
self.tf_obj_pose(self._pose)
def tf_obj_pose(self,pose):
"""
Передача позиции объекта в tf2
"""
t = TransformStamped()
# assign pose to corresponding tf variables
t.header.stamp = self.get_clock().now().to_msg()
t.header.frame_id = 'world'
t.child_frame_id = self.objName
# coordinates
tr = pose[1]
t.transform.translation.x = tr[0]
t.transform.translation.y = tr[1]
t.transform.translation.z = tr[2]
# rotation
q = pose[0]
t.transform.rotation.x = q[1] # 0
t.transform.rotation.y = q[2] # 1
t.transform.rotation.z = q[3] # 2
t.transform.rotation.w = q[0] # 3
# Send the transformation
self.tf_broadcaster.sendTransform(t)
def on_configure(self, state: State) -> TransitionCallbackReturn:
"""
Configure the node, after a configuring transition is requested.
:return: The state machine either invokes a transition to the "inactive" state or stays
in "unconfigured" depending on the return value.
TransitionCallbackReturn.SUCCESS transitions to "inactive".
TransitionCallbackReturn.FAILURE transitions to "unconfigured".
TransitionCallbackReturn.ERROR or any uncaught exceptions to "errorprocessing"
"""
mesh_path = self.get_parameter("mesh_path").get_parameter_value().string_value
if not os.path.isfile(mesh_path):
self.get_logger().info("Parameter 'mesh_path' not set")
return TransitionCallbackReturn.FAILURE
data = os.path.basename(mesh_path)
self.objName = os.path.splitext(data)[0]
self.objMeshFile = mesh_path
self.objPath = self.mytemppath / "examples"
self.objPath.mkdir(exist_ok=True)
self.objPath /= self.objName
self.objPath.mkdir(exist_ok=True)
tPath = self.objPath / "inputs"
tPath.mkdir(exist_ok=True)
tPath = self.objPath / "meshes"
tPath.mkdir(exist_ok=True)
tPath /= self.objName
tPath.mkdir(exist_ok=True)
shutil.copyfile(self.objMeshFile, str(tPath / (self.objName+".ply")))
# Create the subscribers.
self._sub_info = self.create_subscription(CameraInfo, self.topicCameraInfo, self.listener_camera_info, 2)
self._sub_depth = self.create_subscription(Image, self.topicDepth, self.listener_depth, 3)
# Create the publisher.
self._pub = self.create_lifecycle_publisher(Pose, self.topicSrv, 10)
self._timer = self.create_timer(self.publishDelay, self.publish)
# Preload Megapose model
self.megapose_model = ModelPreload(self.objPath,"megapose-1.0-RGB-multi-hypothesis")
self.get_logger().info('on_configure() is called.')
return TransitionCallbackReturn.SUCCESS
def on_activate(self, state: State) -> TransitionCallbackReturn:
# Log
self.get_logger().info('on_activate() is called.')
# Create the main subscriber.
self._sub = self.create_subscription(Image, self.topicImage, self.listener_callback, 3)
return super().on_activate(state)
def on_deactivate(self, state: State) -> TransitionCallbackReturn:
# Log
self.get_logger().info('on_deactivate() is called.')
# Destroy the main subscriber.
self.destroy_subscription(self._sub)
return super().on_deactivate(state)
def on_cleanup(self, state: State) -> TransitionCallbackReturn:
"""
Cleanup the node.
:return: The state machine either invokes a transition to the "unconfigured" state or stays
in "inactive" depending on the return value.
TransitionCallbackReturn.SUCCESS transitions to "unconfigured".
TransitionCallbackReturn.FAILURE transitions to "inactive".
TransitionCallbackReturn.ERROR or any uncaught exceptions to "errorprocessing"
"""
# очистим параметры
node_param = rclpy.parameter.Parameter("mesh_path", rclpy.Parameter.Type.STRING, "")
all_node_param = [node_param]
self.set_parameters(all_node_param)
self._is_camerainfo = False
self.destroy_timer(self._timer)
self.destroy_publisher(self._pub)
self.destroy_subscription(self._sub)
self.destroy_subscription(self._sub_info)
self.destroy_subscription(self._sub_depth)
self.get_logger().info('on_cleanup() is called.')
return TransitionCallbackReturn.SUCCESS
def on_shutdown(self, state: State) -> TransitionCallbackReturn:
"""
Shutdown the node.
:return: The state machine either invokes a transition to the "finalized" state or stays
in the current state depending on the return value.
TransitionCallbackReturn.SUCCESS transitions to "unconfigured".
TransitionCallbackReturn.FAILURE transitions to "inactive".
TransitionCallbackReturn.ERROR or any uncaught exceptions to "errorprocessing"
"""
self.destroy_timer(self._timer)
self.destroy_publisher(self._pub)
self.destroy_subscription(self._sub)
self.destroy_subscription(self._sub_info)
self.destroy_subscription(self._sub_depth)
self.get_logger().info('on_shutdown() is called.')
return TransitionCallbackReturn.SUCCESS
def listener_camera_info(self, data):
"""
CameraInfo callback function.
"""
if self._is_camerainfo: # повторно инфо камеры не читаем
return
self._res = [data.height, data.width]
k_ = data.k
self._K = [
[k_[0], k_[1], k_[2]],
[k_[3], k_[4], k_[5]],
[k_[6], k_[7], k_[8]]
]
tPath = self.objPath / "inputs"
#{"label": "fork", "bbox_modal": [329, 189, 430, 270]}
output_fn = tPath / "object_data.json"
output_json_dict = {
"label": self.objName,
"bbox_modal": [2,2,self._res[1]-4,self._res[0]-4]
}
data = []
data.append(output_json_dict)
output_fn.write_text(json.dumps(data))
#{"K": [[25.0, 0.0, 8.65], [0.0, 25.0, 6.5], [0.0, 0.0, 1.0]], "resolution": [480, 640]}
output_fn = self.objPath / "camera_data.json"
output_json_dict = {
"K": self._K,
"resolution": self._res
}
data = []
data.append(output_json_dict)
output_fn.write_text(json.dumps(output_json_dict))
# установим признак получения инфо камеры
self._is_camerainfo = True
def listener_depth(self, data):
"""
Depth image callback function.
"""
#self.get_logger().info("Receiving depth image")
# Convert ROS Image message to OpenCV image
current_frame = self.br.imgmsg_to_cv2(data)
# Save depth image for Megapose
cv2.imwrite(str(self.objPath / "image_depth.png"), current_frame)
def load_result(self, example_dir: Path, json_name = "object_data.json"):
f = example_dir / "outputs" / json_name
if os.path.isfile(f):
data = f.read_text()
else:
data = "No result file: '" + str(f) + "'"
return data
def listener_callback(self, data):
"""
Image Callback function.
"""
if not self._is_camerainfo:
self.get_logger().warning("No data from CameraInfo")
return
# Convert ROS Image message to OpenCV image
current_frame = self.br.imgmsg_to_cv2(data)
# Save image for Megapose
cv2.imwrite(str(self.objPath / "image_rgb.png"), current_frame)
self._image_cnt += 1
# 6D pose estimation
self.get_logger().info(f"megapose: begin {self._image_cnt} {self.objPath}")
#run_inference(self.objPath,"megapose-1.0-RGB-multi-hypothesis")
run_inference_rbs(self.megapose_model)
data = self.load_result(self.objPath)
if data[0] == "[":
y = json.loads(data)[0]
self._pose = y["TWO"]
self.get_logger().info(f"megapose: end {self._image_cnt}")
def main():
rclpy.init()
executor = rclpy.executors.SingleThreadedExecutor()
lc_node = PoseEstimator("lc_pose_estimator")
executor.add_node(lc_node)
try:
executor.spin()
except (KeyboardInterrupt, rclpy.executors.ExternalShutdownException):
lc_node.destroy_node()
if __name__ == '__main__':
main()

Binary file not shown.

File diff suppressed because one or more lines are too long

View file

@ -0,0 +1,14 @@
<?xml version="1.0"?>
<model>
<name>fork</name>
<sdf version="1.9">model.sdf</sdf>
<author>
<name>bill-finger</name>
<email>ur.narmak@gmail.com</email>
</author>
<description>
The fork link of rbs_arm
</description>
</model>

View file

@ -0,0 +1,58 @@
<?xml version="1.0" ?>
<sdf version="1.9">
<model name="fork">
<pose>0 0 0.015 0 0 0</pose>
<link name="link">
<inertial>
<inertia>
<ixx>0.00004167</ixx>
<ixy>0.0</ixy>
<ixz>0.0</ixz>
<iyy>0.00004167</iyy>
<iyz>0.0</iyz>
<izz>0.00004167</izz>
</inertia>
<mass>0.1</mass>
</inertial>
<!-- <gravity>0</gravity> -->
<collision name="collision">
<geometry>
<mesh>
<uri>meshes/collision/fork.stl</uri>
</mesh>
</geometry>
<surface>
<contact>
<ode/>
</contact>
<bounce/>
<friction>
<torsional>
<ode>
<slip>
0.3
</slip>
</ode>
</torsional>
<ode>
<mu>0.2</mu>
<mu2>0.2</mu2>
</ode>
</friction>
</surface>
</collision>
<visual name="visual">
<geometry>
<mesh>
<uri>meshes/visual/fork.dae</uri>
</mesh>
</geometry>
<!-- <material>
<ambient>0.8 0.8 0.8 1</ambient>
<diffuse>0.8 0.8 0.8 1</diffuse>
<specular>0.8 0.8 0.8 1</specular>
</material> -->
</visual>
</link>
</model>
</sdf>

View file

@ -123,6 +123,11 @@
<uri>model://box1</uri>
<name>box6</name>
<pose>-0.25 0.75 0.025 0 0 0</pose>
</include>
</include>
<include>
<uri>model://fork</uri>
<name>fork_gt</name>
<pose>-0.5 0 0.25 0 0 0</pose>
</include>
</world>
</sdf>