Merge branch 'main' into 96-inspection-skill

This commit is contained in:
Ilya Uraev 2023-12-30 23:58:20 +03:00
commit c44bc1a654
6 changed files with 85 additions and 99 deletions

View file

@ -0,0 +1,17 @@
<?xml version="1.0"?>
<root main_tree_to_execute="PoseEstimation">
<BehaviorTree ID="PoseEstimation">
<Sequence>
<Action ID="PoseEstimation"
ObjectName=""
ReqKind = "deactivate"
server_name="/pose_estimation/change_state"
server_timeout="1000"/>
<Action ID="PoseEstimation"
ObjectName=""
ReqKind = "cleanup"
server_name="/pose_estimation/change_state"
server_timeout="1000"/>
</Sequence>
</BehaviorTree>
</root>

View file

@ -1,29 +1,22 @@
from launch import LaunchDescription from launch import LaunchDescription
from launch.actions import DeclareLaunchArgument
from launch_ros.actions import Node from launch_ros.actions import Node
from launch.substitutions import Command, FindExecutable, LaunchConfiguration, PathJoinSubstitution from launch.substitutions import Command, FindExecutable, LaunchConfiguration, PathJoinSubstitution
from launch_ros.substitutions import FindPackageShare from launch_ros.substitutions import FindPackageShare
from ament_index_python.packages import get_package_share_directory # from ament_index_python.packages import get_package_share_directory
import os # import os
import yaml
import math
from rbs_launch_utils.launch_common import load_yaml
def generate_launch_description(): def generate_launch_description():
# bt_config = os.path.join( declared_arguments = []
# get_package_share_directory('rbs_bt_executor'), declared_arguments.append(
# 'config', DeclareLaunchArgument(
# 'params.yaml' "bt_file",
# ) default_value="test_tree_env_manager.xml",
# points = load_yaml( description="BehaviorTree file for run.",
# package_name="rbs_bt_executor", )
# file_path="config/points.yaml" )
# ) bt_file = LaunchConfiguration("bt_file")
gripperPoints = load_yaml(
package_name="rbs_bt_executor",
file_path="config/gripperPositions.yaml")
robot_description_semantic_content = Command( robot_description_semantic_content = Command(
[ [
@ -42,14 +35,19 @@ def generate_launch_description():
] ]
) )
robot_description_semantic = {"robot_description_semantic": robot_description_semantic_content} robot_description_semantic = {"robot_description_semantic": robot_description_semantic_content}
return LaunchDescription([ btfile_description = PathJoinSubstitution(
[FindPackageShare("rbs_bt_executor"), "bt_trees", bt_file]
)
btfile_param = {"bt_file_path": btfile_description}
nodes_to_start = [
Node( Node(
package='behavior_tree', package='behavior_tree',
executable='bt_engine', executable='bt_engine',
# prefix=['gdbserver localhost:3000'], # prefix=['gdbserver localhost:3000'],
parameters=["/home/bill-finger/rbs_ws/src/robossembler-ros2/rbs_bt_executor/config/params.yaml"] parameters=["/home/bill-finger/rbs_ws/src/robossembler-ros2/rbs_bt_executor/config/params.yaml"]
) )
]) ]
return LaunchDescription(declared_arguments + nodes_to_start)

View file

@ -1,16 +1,10 @@
{ {
"nodeName":"pose_estimation", "nodeName":"pose_estimation",
"cameraLink":"outer_rgbd_camera", "cameraLink":"inner_rgbd_camera",
"topicImage":"/outer_rgbd_camera/image", "topicImage":"/inner_rgbd_camera/image",
"topicCameraInfo":"/outer_rgbd_camera/camera_info", "topicCameraInfo":"/inner_rgbd_camera/camera_info",
"topicDepth":"/outer_rgbd_camera/depth_image", "topicDepth":"/inner_rgbd_camera/depth_image",
"publishDelay": 3.3, "publishDelay": 1.3,
"tf2_send_pose": 1, "tf2_send_pose": 1,
"mesh_scale": 0.001, "mesh_scale": 0.001
"camera_info": {
"fx": 924.855,
"fy": 923.076,
"width": 640,
"height": 480
}
} }

View file

@ -1,5 +1,5 @@
{ {
"mesh_path":"/home/shalenikol/robossembler_ws/src/robossembler-ros2/rbs_perception/config/fork.ply", "mesh_path":"/home/shalenikol/assembly/asp-example2/sdf_generation/bishop/meshes/bishop.ply",
"gtpose_0":[1.3,0.0,0.0,0.0,0.0,0.0], "gtpose_0":[1.3,0.0,0.0,0.0,0.0,0.0],
"darknet_path_0":"/home/shalenikol/test_detection" "darknet_path":"/home/shalenikol/test_detection"
} }

View file

@ -1,29 +1,15 @@
import os
from launch_ros.actions import Node from launch_ros.actions import Node
from ur_moveit_config.launch_common import load_yaml
from launch import LaunchDescription from launch import LaunchDescription
def generate_launch_description(): def generate_launch_description():
declared_arguments = [] declared_arguments = []
points_params = load_yaml("rbs_bt_executor", "config/gripperPositions.yaml")
grasp_pose_loader = Node(
package="rbs_skill_servers",
executable="pick_place_pose_loader_service_server",
output="screen",
emulate_tty=True,
parameters=[
points_params
]
)
pose_estimation = Node( pose_estimation = Node(
package="rbs_perception", package="rbs_perception",
executable="pose_estimation_lifecycle.py", executable="pose_estimation_lifecycle.py",
) )
nodes_to_start = [ nodes_to_start = [
grasp_pose_loader,
pose_estimation pose_estimation
] ]
return LaunchDescription(declared_arguments + nodes_to_start) return LaunchDescription(declared_arguments + nodes_to_start)

View file

@ -89,7 +89,7 @@ class PoseEstimator(Node):
self.publishDelay = 2.0 self.publishDelay = 2.0
self.topicSrv = self.nodeName + "/detect6Dpose" self.topicSrv = self.nodeName + "/detect6Dpose"
self._InitService() self._InitService()
self.camera_pose = Pose() #self.get_camera_pose() self.camera_pose = Pose()
self.tmpdir = tempfile.gettempdir() self.tmpdir = tempfile.gettempdir()
self.mytemppath = Path(self.tmpdir) / "rbs_per" self.mytemppath = Path(self.tmpdir) / "rbs_per"
@ -111,21 +111,6 @@ class PoseEstimator(Node):
self.objName = "" self.objName = ""
self.objMeshFile = "" self.objMeshFile = ""
self.objPath = "" self.objPath = ""
# def get_camera_pose(self) -> Pose:
# # <camera_pose>3.3 2.8 2.8 0 0.5 -2.4</camera_pose>
# p = Pose()
# p.position.x = -2.
# p.position.y = -0.55
# p.position.z = 1.44
# #R = rpyToMatrix([0, 0.5, -2.4])
# #q = pin.Quaternion()
# #q = t3d.euler.euler2quat(0., 0.5, -2.4)
# p.orientation.w = 0.9524 #q[0]
# p.orientation.x = -0.0476 #q[1]
# p.orientation.y = 0.213 #q[2]
# p.orientation.z = 0.213 #q[3]
# return p
def publish(self): def publish(self):
"""Publish a new message when enabled.""" """Publish a new message when enabled."""
@ -137,11 +122,11 @@ class PoseEstimator(Node):
t = self._pose[1] t = self._pose[1]
# transform from View space in World space # transform from View space in World space
# rotate # rotate
q_mir = t3d.quaternions.qmult(self.quat_rotate_xyz, q) #q_mir = t3d.quaternions.qmult(self.quat_rotate_xyz, q)
# translate (xyz = +z-x-y) # translate (xyz = +z-x-y)
t_mir = [t[2], -t[0], -t[1]] #t_mir = [t[2], -t[0], -t[1]]
cam_p = self.camera_pose.position #cam_p = self.camera_pose.position
#cam_Q = self.camera_pose.orientation #cam_Q = self.camera_pose.orientation
# q_rot = [msgQ.w, msgQ.x, msgQ.y, msgQ.z] # q_rot = [msgQ.w, msgQ.x, msgQ.y, msgQ.z]
# q = t3d.quaternions.qinverse(q) #q[0] = -q[0] # q = t3d.quaternions.qinverse(q) #q[0] = -q[0]
@ -150,17 +135,24 @@ class PoseEstimator(Node):
#q_mir = q #q_mir = q
p = Pose() p = Pose()
p.position.x = t_mir[0] + cam_p.x p.position.x = t[0]
p.position.y = t_mir[1] + cam_p.y p.position.y = t[1]
p.position.z = t_mir[2] + cam_p.z p.position.z = t[2]
p.orientation.w = q_mir[0] p.orientation.w = q[0]
p.orientation.x = q_mir[1] p.orientation.x = q[1]
p.orientation.y = q_mir[2] p.orientation.y = q[2]
p.orientation.z = q_mir[3] p.orientation.z = q[3]
# p.position.x = t_mir[0] + cam_p.x
# p.position.y = t_mir[1] + cam_p.y
# p.position.z = t_mir[2] + cam_p.z
# p.orientation.w = q_mir[0]
# p.orientation.x = q_mir[1]
# p.orientation.y = q_mir[2]
# p.orientation.z = q_mir[3]
self._pub.publish(p) self._pub.publish(p)
if self.tf2_send_pose: if self.tf2_send_pose:
self.tf_obj_pose(p.position,q_mir) #(self._pose) self.tf_obj_pose(t,q) #(self._pose)
def tf_obj_pose(self, tr, q): def tf_obj_pose(self, tr, q):
""" """
@ -169,13 +161,13 @@ class PoseEstimator(Node):
t = TransformStamped() t = TransformStamped()
# assign pose to corresponding tf variables # assign pose to corresponding tf variables
t.header.stamp = self.get_clock().now().to_msg() t.header.stamp = self.get_clock().now().to_msg()
t.header.frame_id = 'world' t.header.frame_id = self.camera_link #'world'
t.child_frame_id = self.objName t.child_frame_id = self.objName + "_pp"
# coordinates # coordinates
#tr = pose[1] #tr = pose[1]
t.transform.translation.x = tr.x #[0] t.transform.translation.x = tr[0] #.x
t.transform.translation.y = tr.y #[1] t.transform.translation.y = tr[1] #.y
t.transform.translation.z = tr.z #[2] t.transform.translation.z = tr[2] #.z
# rotation # rotation
#q = pose[0] #q = pose[0]
t.transform.rotation.w = q[0] t.transform.rotation.w = q[0]
@ -253,7 +245,7 @@ class PoseEstimator(Node):
else: else:
self._pose = [t3d.euler.euler2quat(gtpose[3], gtpose[4], gtpose[5]), gtpose[:3]] self._pose = [t3d.euler.euler2quat(gtpose[3], gtpose[4], gtpose[5]), gtpose[:3]]
self.get_logger().info('on_configure() is called.') self.get_logger().info("on_configure() is called.")
return TransitionCallbackReturn.SUCCESS return TransitionCallbackReturn.SUCCESS
def on_activate(self, state: State) -> TransitionCallbackReturn: def on_activate(self, state: State) -> TransitionCallbackReturn:
@ -334,22 +326,17 @@ class PoseEstimator(Node):
[k_[3], k_[4]*2.0, data.height / 2.0], # k_[5]], # [k_[3], k_[4]*2.0, data.height / 2.0], # k_[5]], #
[k_[6], k_[7], k_[8]] #self.mesh_scale] [k_[6], k_[7], k_[8]] #self.mesh_scale]
] ]
# self._K = [
# [k_[0]*2.0, k_[1], data.width / 2.0], # k_[2]], #
# [k_[3], k_[4]*2.0, data.height / 2.0], # k_[5]], #
# [k_[6], k_[7], k_[8]] #self.mesh_scale]
# ]
# *** for test # *** for test
tPath = self.objPath / "inputs" # tPath = self.objPath / "inputs"
output_fn = tPath / "object_data.json" # output_fn = tPath / "object_data.json"
output_json_dict = { # output_json_dict = {
"label": self.objName, # "label": self.objName,
"bbox_modal": [168,173,260,252] # 0.2 0 0 -> [288,170,392,253] # "bbox_modal": [168,173,260,252] # 0.2 0 0 -> [288,170,392,253]
} # }
data = [] # data = []
data.append(output_json_dict) # data.append(output_json_dict)
output_fn.write_text(json.dumps(data)) # output_fn.write_text(json.dumps(data))
# *** # ***
#{"K": [[25.0, 0.0, 8.65], [0.0, 25.0, 6.5], [0.0, 0.0, 1.0]], "resolution": [480, 640]} #{"K": [[25.0, 0.0, 8.65], [0.0, 25.0, 6.5], [0.0, 0.0, 1.0]], "resolution": [480, 640]}
@ -413,7 +400,7 @@ class PoseEstimator(Node):
output_fn = path_to / "inputs/object_data.json" output_fn = path_to / "inputs/object_data.json"
output_json_dict = { output_json_dict = {
"label": self.objName, "label": self.objName,
"bbox_modal": bbox #[288,170,392,253] "bbox_modal": bbox
} }
data = [] data = []
data.append(output_json_dict) data.append(output_json_dict)
@ -441,6 +428,9 @@ class PoseEstimator(Node):
self.camera_pose.orientation.x = t.transform.rotation.x self.camera_pose.orientation.x = t.transform.rotation.x
self.camera_pose.orientation.y = t.transform.rotation.y self.camera_pose.orientation.y = t.transform.rotation.y
self.camera_pose.orientation.z = t.transform.rotation.z self.camera_pose.orientation.z = t.transform.rotation.z
# self._pose[0] = [t.transform.rotation.w, t.transform.rotation.x, t.transform.rotation.y, t.transform.rotation.z]
# self._pose[1] = [t.transform.translation.x, t.transform.translation.y, t.transform.translation.z]
# Convert ROS Image message to OpenCV image # Convert ROS Image message to OpenCV image
current_frame = self.br.imgmsg_to_cv2(data) current_frame = self.br.imgmsg_to_cv2(data)
@ -492,3 +482,4 @@ def main():
lc_node.destroy_node() lc_node.destroy_node()
if __name__ == '__main__': if __name__ == '__main__':
main()