Merge remote-tracking branch 'origin/main' into 90-real-robot-setup
This commit is contained in:
commit
af6b20b622
4 changed files with 19 additions and 20 deletions
|
@ -3,7 +3,7 @@
|
||||||
<BehaviorTree ID="PoseEstimation">
|
<BehaviorTree ID="PoseEstimation">
|
||||||
<Sequence>
|
<Sequence>
|
||||||
<Action ID="PoseEstimation"
|
<Action ID="PoseEstimation"
|
||||||
ObjectName="!/rbs_ws/src/robossembler-ros2/rbs_perception/config/str_param.json"
|
ObjectName="!/home/shalenikol/robossembler_ws/src/robossembler-ros2/rbs_perception/config/str_param.json"
|
||||||
ReqKind = "calibrate"
|
ReqKind = "calibrate"
|
||||||
server_name="/pose_estimation/change_state"
|
server_name="/pose_estimation/change_state"
|
||||||
server_timeout="1000"/>
|
server_timeout="1000"/>
|
||||||
|
@ -14,4 +14,3 @@
|
||||||
server_timeout="1000"/>
|
server_timeout="1000"/>
|
||||||
</Sequence>
|
</Sequence>
|
||||||
</BehaviorTree>
|
</BehaviorTree>
|
||||||
</root>
|
|
|
@ -101,6 +101,15 @@ private:
|
||||||
_set_params_client->set_parameters(_parameters);
|
_set_params_client->set_parameters(_parameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void set_mesh_param() {
|
||||||
|
auto _package_name = ament_index_cpp::get_package_share_directory("rbs_perception");
|
||||||
|
_model_path = build_model_path(_model_name, _package_name);
|
||||||
|
RCLCPP_INFO_STREAM(_node->get_logger(), _model_path);
|
||||||
|
|
||||||
|
std::vector<rclcpp::Parameter> _parameters{rclcpp::Parameter("mesh_path", _model_name)};
|
||||||
|
_set_params_client->set_parameters(_parameters);
|
||||||
|
}
|
||||||
|
|
||||||
void set_mesh_param() {
|
void set_mesh_param() {
|
||||||
auto _package_name =
|
auto _package_name =
|
||||||
ament_index_cpp::get_package_share_directory("rbs_perception");
|
ament_index_cpp::get_package_share_directory("rbs_perception");
|
||||||
|
|
|
@ -197,14 +197,6 @@ class PoseEstimator(Node):
|
||||||
"""
|
"""
|
||||||
gtpose = None
|
gtpose = None
|
||||||
str_param = self.get_parameter("mesh_path").get_parameter_value().string_value
|
str_param = self.get_parameter("mesh_path").get_parameter_value().string_value
|
||||||
# if str_param[0] == "{": # json string
|
|
||||||
# y = json.loads(str_param)
|
|
||||||
# if "mesh_path" not in y:
|
|
||||||
# self.get_logger().info("JSon 'mesh_path' not set")
|
|
||||||
# return TransitionCallbackReturn.FAILURE
|
|
||||||
# mesh_path = y["mesh_path"]
|
|
||||||
# if "gtpose" in y:
|
|
||||||
# gtpose = y["gtpose"]
|
|
||||||
if str_param[0] == "!": # filename json
|
if str_param[0] == "!": # filename json
|
||||||
json_file = str_param[1:]
|
json_file = str_param[1:]
|
||||||
if not os.path.isfile(json_file):
|
if not os.path.isfile(json_file):
|
||||||
|
@ -342,9 +334,14 @@ class PoseEstimator(Node):
|
||||||
[k_[3], k_[4]*2.0, data.height / 2.0], # k_[5]], #
|
[k_[3], k_[4]*2.0, data.height / 2.0], # k_[5]], #
|
||||||
[k_[6], k_[7], k_[8]] #self.mesh_scale]
|
[k_[6], k_[7], k_[8]] #self.mesh_scale]
|
||||||
]
|
]
|
||||||
|
# self._K = [
|
||||||
|
# [k_[0]*2.0, k_[1], data.width / 2.0], # k_[2]], #
|
||||||
|
# [k_[3], k_[4]*2.0, data.height / 2.0], # k_[5]], #
|
||||||
|
# [k_[6], k_[7], k_[8]] #self.mesh_scale]
|
||||||
|
# ]
|
||||||
|
|
||||||
|
# *** for test
|
||||||
tPath = self.objPath / "inputs"
|
tPath = self.objPath / "inputs"
|
||||||
#{"label": "fork", "bbox_modal": [329, 189, 430, 270]}
|
|
||||||
output_fn = tPath / "object_data.json"
|
output_fn = tPath / "object_data.json"
|
||||||
output_json_dict = {
|
output_json_dict = {
|
||||||
"label": self.objName,
|
"label": self.objName,
|
||||||
|
@ -353,6 +350,7 @@ class PoseEstimator(Node):
|
||||||
data = []
|
data = []
|
||||||
data.append(output_json_dict)
|
data.append(output_json_dict)
|
||||||
output_fn.write_text(json.dumps(data))
|
output_fn.write_text(json.dumps(data))
|
||||||
|
# ***
|
||||||
|
|
||||||
#{"K": [[25.0, 0.0, 8.65], [0.0, 25.0, 6.5], [0.0, 0.0, 1.0]], "resolution": [480, 640]}
|
#{"K": [[25.0, 0.0, 8.65], [0.0, 25.0, 6.5], [0.0, 0.0, 1.0]], "resolution": [480, 640]}
|
||||||
output_fn = self.objPath / "camera_data.json"
|
output_fn = self.objPath / "camera_data.json"
|
||||||
|
@ -371,8 +369,6 @@ class PoseEstimator(Node):
|
||||||
"""
|
"""
|
||||||
Depth image callback function.
|
Depth image callback function.
|
||||||
"""
|
"""
|
||||||
#self.get_logger().info("Receiving depth image")
|
|
||||||
|
|
||||||
# Convert ROS Image message to OpenCV image
|
# Convert ROS Image message to OpenCV image
|
||||||
current_frame = self.br.imgmsg_to_cv2(data)
|
current_frame = self.br.imgmsg_to_cv2(data)
|
||||||
|
|
||||||
|
@ -414,7 +410,6 @@ class PoseEstimator(Node):
|
||||||
bbox = [2, 2, self._res[1]-4, self._res[0]-4]
|
bbox = [2, 2, self._res[1]-4, self._res[0]-4]
|
||||||
|
|
||||||
#tPath = path_to / "inputs"
|
#tPath = path_to / "inputs"
|
||||||
#{"label": "fork", "bbox_modal": [329, 189, 430, 270]}
|
|
||||||
output_fn = path_to / "inputs/object_data.json"
|
output_fn = path_to / "inputs/object_data.json"
|
||||||
output_json_dict = {
|
output_json_dict = {
|
||||||
"label": self.objName,
|
"label": self.objName,
|
||||||
|
@ -462,8 +457,6 @@ class PoseEstimator(Node):
|
||||||
self.get_logger().info(f"darknet: begin {self._image_cnt}")
|
self.get_logger().info(f"darknet: begin {self._image_cnt}")
|
||||||
result_json = str(self.objPath / "res.json")
|
result_json = str(self.objPath / "res.json")
|
||||||
|
|
||||||
#with open("log_darknet.txt", "w") as logf:
|
|
||||||
#self.get_logger().is_enabled_for(LoggingSeverity.WARN)
|
|
||||||
subprocess.run([darknet_bin, "detector", "test",
|
subprocess.run([darknet_bin, "detector", "test",
|
||||||
"yolov4_objs2.data",
|
"yolov4_objs2.data",
|
||||||
"yolov4_objs2.cfg",
|
"yolov4_objs2.cfg",
|
||||||
|
@ -475,11 +468,9 @@ class PoseEstimator(Node):
|
||||||
detected = self.yolo2megapose(result_json, self.objPath)
|
detected = self.yolo2megapose(result_json, self.objPath)
|
||||||
self.get_logger().info(f"darknet: end {self._image_cnt}")
|
self.get_logger().info(f"darknet: end {self._image_cnt}")
|
||||||
|
|
||||||
#if detected and self.megapose_model:
|
if detected and self.megapose_model:
|
||||||
if self.megapose_model:
|
|
||||||
# 6D pose estimation
|
# 6D pose estimation
|
||||||
self.get_logger().info(f"megapose: begin {self._image_cnt} {self.objPath}")
|
self.get_logger().info(f"megapose: begin {self._image_cnt} {self.objPath}")
|
||||||
#run_inference(self.objPath,"megapose-1.0-RGB-multi-hypothesis")
|
|
||||||
from megapose.scripts.run_inference_on_example import run_inference_rbs
|
from megapose.scripts.run_inference_on_example import run_inference_rbs
|
||||||
run_inference_rbs(self.megapose_model)
|
run_inference_rbs(self.megapose_model)
|
||||||
|
|
||||||
|
@ -501,4 +492,3 @@ def main():
|
||||||
lc_node.destroy_node()
|
lc_node.destroy_node()
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
|
|
@ -80,6 +80,7 @@ def generate_launch_description():
|
||||||
'/outer_rgbd_camera/points@sensor_msgs/msg/PointCloud2@gz.msgs.PointCloudPacked'
|
'/outer_rgbd_camera/points@sensor_msgs/msg/PointCloud2@gz.msgs.PointCloudPacked'
|
||||||
],
|
],
|
||||||
output='screen',
|
output='screen',
|
||||||
|
condition=IfCondition(sim_gazebo)
|
||||||
)
|
)
|
||||||
|
|
||||||
rgbd_bridge_in = Node(
|
rgbd_bridge_in = Node(
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue