transform from View space in World space

This commit is contained in:
shalenikol 2023-12-14 11:29:14 +03:00
parent a2c819d68e
commit 88e9cce3bd
3 changed files with 45 additions and 41 deletions

View file

@ -1,5 +1,5 @@
{
"mesh_path":"/home/shalenikol/robossembler_ws/src/robossembler-ros2/rbs_perception/config/fork.ply",
"gtpose":[1.3,0.0,0.0,0.0,0.0,0.0],
"darknet_path":"/home/shalenikol/test_detection"
"gtpose_0":[1.3,0.0,0.0,0.0,0.0,0.0],
"darknet_path_0":"/home/shalenikol/test_detection"
}

View file

@ -13,8 +13,6 @@ import shutil
import json
import tempfile
from pathlib import Path
#import pinocchio as pin
#from pinocchio.rpy import matrixToRpy, rpyToMatrix, rotate
import transforms3d as t3d
import rclpy
@ -23,9 +21,7 @@ from rclpy.lifecycle import Publisher
from rclpy.lifecycle import State
from rclpy.lifecycle import TransitionCallbackReturn
from rclpy.timer import Timer
#from rclpy.impl.logging_severity import LoggingSeverity
#from tf.transformations import quaternion_from_euler
from ament_index_python.packages import get_package_share_directory
from sensor_msgs.msg import Image, CameraInfo
from geometry_msgs.msg import Pose, TransformStamped
@ -79,6 +75,7 @@ class PoseEstimator(Node):
self._pose = [[1., 0., 0., 0.], [0., 0., 0.]] # pose in format "TWO" (megapose)
self.tf2_send_pose = 0
self.mesh_scale = 1.0
self.quat_rotate_xyz = [0.5, 0.5, -0.5, -0.5]
self.megapose_model = None
self.darknet_path = ""
@ -135,23 +132,24 @@ class PoseEstimator(Node):
# опубликуем результат оценки позы # publish pose estimation result
q = self._pose[0]
t = self._pose[1]
"""
msgQ = self.camera_pose.orientation
q_rot = [msgQ.w, msgQ.x, msgQ.y, msgQ.z]
q = t3d.quaternions.qinverse(q) #q[0] = -q[0]
q_mir = t3d.quaternions.qmult(q_rot,q)
"""
q_mir = q
# transform from View space in World space
# rotate
q_mir = t3d.quaternions.qmult(self.quat_rotate_xyz, q)
# translate (xyz = +z-x-y)
t_mir = [t[2], -t[0], -t[1]]
cam_p = self.camera_pose.position
#cam_Q = self.camera_pose.orientation
# q_rot = [msgQ.w, msgQ.x, msgQ.y, msgQ.z]
# q = t3d.quaternions.qinverse(q) #q[0] = -q[0]
# q_mir = t3d.quaternions.qmult(q_rot,q)
#q_mir = q
p = Pose()
"""
p.position.x = t[0] * self.mesh_scale
p.position.y = t[1] * self.mesh_scale
p.position.z = t[2] * self.mesh_scale
"""
p.position.x = t[0]
p.position.y = t[1]
p.position.z = t[2]
p.position.x = t_mir[0] + cam_p.x
p.position.y = t_mir[1] + cam_p.y
p.position.z = t_mir[2] + cam_p.z
p.orientation.w = q_mir[0]
p.orientation.x = q_mir[1]
p.orientation.y = q_mir[2]
@ -196,15 +194,15 @@ class PoseEstimator(Node):
"""
gtpose = None
str_param = self.get_parameter("mesh_path").get_parameter_value().string_value
if str_param[0] == "{": # json string
y = json.loads(str_param)
if "mesh_path" not in y:
self.get_logger().info("JSon 'mesh_path' not set")
return TransitionCallbackReturn.FAILURE
mesh_path = y["mesh_path"]
if "gtpose" in y:
gtpose = y["gtpose"]
elif str_param[0] == "!": # filename json
# if str_param[0] == "{": # json string
# y = json.loads(str_param)
# if "mesh_path" not in y:
# self.get_logger().info("JSon 'mesh_path' not set")
# return TransitionCallbackReturn.FAILURE
# mesh_path = y["mesh_path"]
# if "gtpose" in y:
# gtpose = y["gtpose"]
if str_param[0] == "!": # filename json
json_file = str_param[1:]
if not os.path.isfile(json_file):
self.get_logger().info("not JSon '"+ json_file +"'")
@ -331,27 +329,27 @@ class PoseEstimator(Node):
self._res = [data.height, data.width]
k_ = data.k
# self._K = [
# [k_[0], k_[1], k_[2]],
# [k_[3], k_[4], k_[5]],
# [k_[6], k_[7], k_[8]]
# ]
self._K = [
[k_[0], k_[1], k_[2]],
[k_[3], k_[4], k_[5]],
[k_[6], k_[7], k_[8]]
]
"""self._K = [
[k_[0]*2.0, k_[1], data.width / 2.0], # k_[2]], #
[k_[3], k_[4]*2.0, data.height / 2.0], # k_[5]], #
[k_[6], k_[7], k_[8]] #self.mesh_scale]
]"""
]
"""tPath = self.objPath / "inputs"
tPath = self.objPath / "inputs"
#{"label": "fork", "bbox_modal": [329, 189, 430, 270]}
output_fn = tPath / "object_data.json"
output_json_dict = {
"label": self.objName,
"bbox_modal": [288,170,392,253] #[2,2,self._res[1]-4,self._res[0]-4]
"bbox_modal": [168,173,260,252] # 0.2 0 0 -> [288,170,392,253]
}
data = []
data.append(output_json_dict)
output_fn.write_text(json.dumps(data))"""
output_fn.write_text(json.dumps(data))
#{"K": [[25.0, 0.0, 8.65], [0.0, 25.0, 6.5], [0.0, 0.0, 1.0]], "resolution": [480, 640]}
output_fn = self.objPath / "camera_data.json"
@ -474,7 +472,8 @@ class PoseEstimator(Node):
detected = self.yolo2megapose(result_json, self.objPath)
self.get_logger().info(f"darknet: end {self._image_cnt}")
if detected and self.megapose_model:
#if detected and self.megapose_model:
if self.megapose_model:
# 6D pose estimation
self.get_logger().info(f"megapose: begin {self._image_cnt} {self.objPath}")
#run_inference(self.objPath,"megapose-1.0-RGB-multi-hypothesis")

View file

@ -127,7 +127,12 @@
<include>
<uri>model://fork</uri>
<name>fork_gt</name>
<pose>1.5 0 0.25 0 0 1.57</pose>
<pose>1.5 0.3 0.25 0 0 0</pose>
</include>
<!-->
<uri>model://king</uri>
<name>king1</name>
<pose>1.5 0.2 0.25 0 0 0</pose>
</-->
</world>
</sdf>