rbs_executor with bt_file (param)

This commit is contained in:
shalenikol 2023-12-29 22:55:27 +03:00
parent d1bddab437
commit a5f3f6bc2e
7 changed files with 94 additions and 106 deletions

View file

@ -1,16 +1,17 @@
<?xml version="1.0"?>
<root main_tree_to_execute="PoseEstimation">
<BehaviorTree ID="PoseEstimation">
<Sequence>
<Action ID="PoseEstimation"
<BehaviorTree ID="PoseEstimation">
<Sequence>
<Action ID="PoseEstimation"
ObjectName="!/home/shalenikol/robossembler_ws/src/robossembler-ros2/rbs_perception/config/str_param.json"
ReqKind = "calibrate"
server_name="/pose_estimation/change_state"
server_timeout="1000"/>
<Action ID="PoseEstimation"
<Action ID="PoseEstimation"
ObjectName=""
ReqKind = "activate"
server_name="/pose_estimation/change_state"
server_timeout="1000"/>
</Sequence>
</BehaviorTree>
</Sequence>
</BehaviorTree>
</root>

View file

@ -0,0 +1,17 @@
<?xml version="1.0"?>
<root main_tree_to_execute="PoseEstimation">
<BehaviorTree ID="PoseEstimation">
<Sequence>
<Action ID="PoseEstimation"
ObjectName=""
ReqKind = "deactivate"
server_name="/pose_estimation/change_state"
server_timeout="1000"/>
<Action ID="PoseEstimation"
ObjectName=""
ReqKind = "cleanup"
server_name="/pose_estimation/change_state"
server_timeout="1000"/>
</Sequence>
</BehaviorTree>
</root>

View file

@ -1,29 +1,22 @@
from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch_ros.actions import Node
from launch.substitutions import Command, FindExecutable, LaunchConfiguration, PathJoinSubstitution
from launch_ros.substitutions import FindPackageShare
from ament_index_python.packages import get_package_share_directory
import os
import yaml
import math
from rbs_launch_utils.launch_common import load_yaml
# from ament_index_python.packages import get_package_share_directory
# import os
def generate_launch_description():
# bt_config = os.path.join(
# get_package_share_directory('rbs_bt_executor'),
# 'config',
# 'params.yaml'
# )
# points = load_yaml(
# package_name="rbs_bt_executor",
# file_path="config/points.yaml"
# )
gripperPoints = load_yaml(
package_name="rbs_bt_executor",
file_path="config/gripperPositions.yaml")
declared_arguments = []
declared_arguments.append(
DeclareLaunchArgument(
"bt_file",
default_value="test_tree_env_manager.xml",
description="BehaviorTree file for run.",
)
)
bt_file = LaunchConfiguration("bt_file")
robot_description_semantic_content = Command(
[
@ -42,16 +35,20 @@ def generate_launch_description():
]
)
robot_description_semantic = {"robot_description_semantic": robot_description_semantic_content}
return LaunchDescription([
btfile_description = PathJoinSubstitution(
[FindPackageShare("rbs_bt_executor"), "bt_trees", bt_file]
)
btfile_param = {"bt_file_path": btfile_description}
nodes_to_start = [
Node(
package='behavior_tree',
executable='bt_engine',
#prefix=['xterm -e gdb -ex run --args'],
parameters=[
{'bt_file_path': os.path.join(get_package_share_directory('rbs_bt_executor'), 'bt_trees/test_tree_env_manager.xml')},
btfile_param,
#{'bt_file_path': os.path.join(get_package_share_directory('rbs_bt_executor'), 'bt_trees/' + bt_file)},
{'plugins':["rbs_skill_move_topose_bt_action_client",
"rbs_skill_get_pick_place_pose_service_client",
"rbs_skill_gripper_move_bt_action_client",
@ -64,4 +61,6 @@ def generate_launch_description():
robot_description_semantic
]
)
])
]
return LaunchDescription(declared_arguments + nodes_to_start)

View file

@ -1,16 +1,10 @@
{
"nodeName":"pose_estimation",
"cameraLink":"outer_rgbd_camera",
"topicImage":"/outer_rgbd_camera/image",
"topicCameraInfo":"/outer_rgbd_camera/camera_info",
"topicDepth":"/outer_rgbd_camera/depth_image",
"publishDelay": 3.3,
"cameraLink":"inner_rgbd_camera",
"topicImage":"/inner_rgbd_camera/image",
"topicCameraInfo":"/inner_rgbd_camera/camera_info",
"topicDepth":"/inner_rgbd_camera/depth_image",
"publishDelay": 1.3,
"tf2_send_pose": 1,
"mesh_scale": 0.001,
"camera_info": {
"fx": 924.855,
"fy": 923.076,
"width": 640,
"height": 480
}
"mesh_scale": 0.001
}

View file

@ -1,5 +1,5 @@
{
"mesh_path":"/home/shalenikol/robossembler_ws/src/robossembler-ros2/rbs_perception/config/fork.ply",
"mesh_path":"/home/shalenikol/assembly/asp-example2/sdf_generation/bishop/meshes/bishop.ply",
"gtpose_0":[1.3,0.0,0.0,0.0,0.0,0.0],
"darknet_path_0":"/home/shalenikol/test_detection"
"darknet_path":"/home/shalenikol/test_detection"
}

View file

@ -1,29 +1,15 @@
import os
from launch_ros.actions import Node
from ur_moveit_config.launch_common import load_yaml
from launch import LaunchDescription
def generate_launch_description():
declared_arguments = []
points_params = load_yaml("rbs_bt_executor", "config/gripperPositions.yaml")
grasp_pose_loader = Node(
package="rbs_skill_servers",
executable="pick_place_pose_loader_service_server",
output="screen",
emulate_tty=True,
parameters=[
points_params
]
)
pose_estimation = Node(
package="rbs_perception",
executable="pose_estimation_lifecycle.py",
)
nodes_to_start = [
grasp_pose_loader,
pose_estimation
]
return LaunchDescription(declared_arguments + nodes_to_start)

View file

@ -89,7 +89,7 @@ class PoseEstimator(Node):
self.publishDelay = 2.0
self.topicSrv = self.nodeName + "/detect6Dpose"
self._InitService()
self.camera_pose = Pose() #self.get_camera_pose()
self.camera_pose = Pose()
self.tmpdir = tempfile.gettempdir()
self.mytemppath = Path(self.tmpdir) / "rbs_per"
@ -111,21 +111,6 @@ class PoseEstimator(Node):
self.objName = ""
self.objMeshFile = ""
self.objPath = ""
# def get_camera_pose(self) -> Pose:
# # <camera_pose>3.3 2.8 2.8 0 0.5 -2.4</camera_pose>
# p = Pose()
# p.position.x = -2.
# p.position.y = -0.55
# p.position.z = 1.44
# #R = rpyToMatrix([0, 0.5, -2.4])
# #q = pin.Quaternion()
# #q = t3d.euler.euler2quat(0., 0.5, -2.4)
# p.orientation.w = 0.9524 #q[0]
# p.orientation.x = -0.0476 #q[1]
# p.orientation.y = 0.213 #q[2]
# p.orientation.z = 0.213 #q[3]
# return p
def publish(self):
"""Publish a new message when enabled."""
@ -137,11 +122,11 @@ class PoseEstimator(Node):
t = self._pose[1]
# transform from View space in World space
# rotate
q_mir = t3d.quaternions.qmult(self.quat_rotate_xyz, q)
#q_mir = t3d.quaternions.qmult(self.quat_rotate_xyz, q)
# translate (xyz = +z-x-y)
t_mir = [t[2], -t[0], -t[1]]
#t_mir = [t[2], -t[0], -t[1]]
cam_p = self.camera_pose.position
#cam_p = self.camera_pose.position
#cam_Q = self.camera_pose.orientation
# q_rot = [msgQ.w, msgQ.x, msgQ.y, msgQ.z]
# q = t3d.quaternions.qinverse(q) #q[0] = -q[0]
@ -150,17 +135,24 @@ class PoseEstimator(Node):
#q_mir = q
p = Pose()
p.position.x = t_mir[0] + cam_p.x
p.position.y = t_mir[1] + cam_p.y
p.position.z = t_mir[2] + cam_p.z
p.orientation.w = q_mir[0]
p.orientation.x = q_mir[1]
p.orientation.y = q_mir[2]
p.orientation.z = q_mir[3]
p.position.x = t[0]
p.position.y = t[1]
p.position.z = t[2]
p.orientation.w = q[0]
p.orientation.x = q[1]
p.orientation.y = q[2]
p.orientation.z = q[3]
# p.position.x = t_mir[0] + cam_p.x
# p.position.y = t_mir[1] + cam_p.y
# p.position.z = t_mir[2] + cam_p.z
# p.orientation.w = q_mir[0]
# p.orientation.x = q_mir[1]
# p.orientation.y = q_mir[2]
# p.orientation.z = q_mir[3]
self._pub.publish(p)
if self.tf2_send_pose:
self.tf_obj_pose(p.position,q_mir) #(self._pose)
self.tf_obj_pose(t,q) #(self._pose)
def tf_obj_pose(self, tr, q):
"""
@ -169,13 +161,13 @@ class PoseEstimator(Node):
t = TransformStamped()
# assign pose to corresponding tf variables
t.header.stamp = self.get_clock().now().to_msg()
t.header.frame_id = 'world'
t.child_frame_id = self.objName
t.header.frame_id = self.camera_link #'world'
t.child_frame_id = self.objName + "_pp"
# coordinates
#tr = pose[1]
t.transform.translation.x = tr.x #[0]
t.transform.translation.y = tr.y #[1]
t.transform.translation.z = tr.z #[2]
t.transform.translation.x = tr[0] #.x
t.transform.translation.y = tr[1] #.y
t.transform.translation.z = tr[2] #.z
# rotation
#q = pose[0]
t.transform.rotation.w = q[0]
@ -253,7 +245,7 @@ class PoseEstimator(Node):
else:
self._pose = [t3d.euler.euler2quat(gtpose[3], gtpose[4], gtpose[5]), gtpose[:3]]
self.get_logger().info('on_configure() is called.')
self.get_logger().info("on_configure() is called.")
return TransitionCallbackReturn.SUCCESS
def on_activate(self, state: State) -> TransitionCallbackReturn:
@ -334,22 +326,17 @@ class PoseEstimator(Node):
[k_[3], k_[4]*2.0, data.height / 2.0], # k_[5]], #
[k_[6], k_[7], k_[8]] #self.mesh_scale]
]
# self._K = [
# [k_[0]*2.0, k_[1], data.width / 2.0], # k_[2]], #
# [k_[3], k_[4]*2.0, data.height / 2.0], # k_[5]], #
# [k_[6], k_[7], k_[8]] #self.mesh_scale]
# ]
# *** for test
tPath = self.objPath / "inputs"
output_fn = tPath / "object_data.json"
output_json_dict = {
"label": self.objName,
"bbox_modal": [168,173,260,252] # 0.2 0 0 -> [288,170,392,253]
}
data = []
data.append(output_json_dict)
output_fn.write_text(json.dumps(data))
# tPath = self.objPath / "inputs"
# output_fn = tPath / "object_data.json"
# output_json_dict = {
# "label": self.objName,
# "bbox_modal": [168,173,260,252] # 0.2 0 0 -> [288,170,392,253]
# }
# data = []
# data.append(output_json_dict)
# output_fn.write_text(json.dumps(data))
# ***
#{"K": [[25.0, 0.0, 8.65], [0.0, 25.0, 6.5], [0.0, 0.0, 1.0]], "resolution": [480, 640]}
@ -413,7 +400,7 @@ class PoseEstimator(Node):
output_fn = path_to / "inputs/object_data.json"
output_json_dict = {
"label": self.objName,
"bbox_modal": bbox #[288,170,392,253]
"bbox_modal": bbox
}
data = []
data.append(output_json_dict)
@ -441,6 +428,9 @@ class PoseEstimator(Node):
self.camera_pose.orientation.x = t.transform.rotation.x
self.camera_pose.orientation.y = t.transform.rotation.y
self.camera_pose.orientation.z = t.transform.rotation.z
# self._pose[0] = [t.transform.rotation.w, t.transform.rotation.x, t.transform.rotation.y, t.transform.rotation.z]
# self._pose[1] = [t.transform.translation.x, t.transform.translation.y, t.transform.translation.z]
# Convert ROS Image message to OpenCV image
current_frame = self.br.imgmsg_to_cv2(data)
@ -492,3 +482,4 @@ def main():
lc_node.destroy_node()
if __name__ == '__main__':
main()