Добавлен алгоритм генерации набора данных и обучения навыка распознаванию объектов на базе YOLOv4"
This commit is contained in:
parent
0c433c10d5
commit
74f1a1493e
6 changed files with 2740 additions and 0 deletions
141
ObjectDetection/obj2Yolov4dataset.py
Normal file
141
ObjectDetection/obj2Yolov4dataset.py
Normal file
|
@ -0,0 +1,141 @@
|
|||
import blenderproc as bproc
|
||||
"""
|
||||
obj2Yolov4dataset
|
||||
Общая задача: обнаружение объекта (Object detection)
|
||||
Реализуемая функция: создание датасета в формате YoloV4 для заданного объекта (*.obj)
|
||||
Используется модуль blenderproc
|
||||
|
||||
24.01.2023 @shalenikol release 0.1
|
||||
"""
|
||||
import numpy as np
|
||||
import argparse
|
||||
import random
|
||||
import os
|
||||
import shutil
|
||||
import json
|
||||
|
||||
def convert2relative(height, width, bbox):
|
||||
"""
|
||||
YOLO format use relative coordinates for annotation
|
||||
"""
|
||||
x, y, w, h = bbox
|
||||
return x/width, y/height, w/width, h/height
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('scene', nargs='?', default="resources/robossembler-asset.obj", help="Path to the object file.")
|
||||
parser.add_argument('output_dir', nargs='?', default="output", help="Path to where the final files, will be saved")
|
||||
parser.add_argument('--imgs', default=1, type=int, help="The number of times the objects should be rendered.")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.isdir(args.output_dir):
|
||||
os.mkdir(args.output_dir)
|
||||
|
||||
bproc.init()
|
||||
|
||||
# load the objects into the scene
|
||||
obj = bproc.loader.load_obj(args.scene)[0]
|
||||
obj.set_cp("category_id", 1)
|
||||
|
||||
# Randomly perturbate the material of the object
|
||||
mat = obj.get_materials()[0]
|
||||
mat.set_principled_shader_value("Specular", random.uniform(0, 1))
|
||||
mat.set_principled_shader_value("Roughness", random.uniform(0, 1))
|
||||
mat.set_principled_shader_value("Base Color", np.random.uniform([0, 0, 0, 1], [1, 1, 1, 1]))
|
||||
mat.set_principled_shader_value("Metallic", random.uniform(0, 1))
|
||||
|
||||
# Create a new light
|
||||
light = bproc.types.Light()
|
||||
light.set_type("POINT")
|
||||
# Sample its location around the object
|
||||
light.set_location(bproc.sampler.shell(
|
||||
center=obj.get_location(),
|
||||
radius_min=1,
|
||||
radius_max=5,
|
||||
elevation_min=1,
|
||||
elevation_max=89
|
||||
))
|
||||
# Randomly set the color and energy
|
||||
light.set_color(np.random.uniform([0.5, 0.5, 0.5], [1, 1, 1]))
|
||||
light.set_energy(random.uniform(100, 1000))
|
||||
|
||||
bproc.camera.set_resolution(640, 480)
|
||||
|
||||
# Sample five camera poses
|
||||
poses = 0
|
||||
tries = 0
|
||||
while tries < 10000 and poses < args.imgs:
|
||||
# Sample random camera location around the object
|
||||
location = bproc.sampler.shell(
|
||||
center=obj.get_location(),
|
||||
radius_min=1,
|
||||
radius_max=4,
|
||||
elevation_min=1,
|
||||
elevation_max=89
|
||||
)
|
||||
# Compute rotation based lookat point which is placed randomly around the object
|
||||
lookat_point = obj.get_location() + np.random.uniform([-0.5, -0.5, -0.5], [0.5, 0.5, 0.5])
|
||||
rotation_matrix = bproc.camera.rotation_from_forward_vec(lookat_point - location, inplane_rot=np.random.uniform(-0.7854, 0.7854))
|
||||
# Add homog cam pose based on location an rotation
|
||||
cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
|
||||
|
||||
# Only add camera pose if object is still visible
|
||||
if obj in bproc.camera.visible_objects(cam2world_matrix):
|
||||
bproc.camera.add_camera_pose(cam2world_matrix)
|
||||
poses += 1
|
||||
tries += 1
|
||||
|
||||
# Enable transparency so the background becomes transparent
|
||||
bproc.renderer.set_output_format(enable_transparency=True)
|
||||
# add segmentation masks (per class and per instance)
|
||||
bproc.renderer.enable_segmentation_output(map_by=["category_id", "instance", "name"])
|
||||
|
||||
# Render RGB images
|
||||
data = bproc.renderer.render()
|
||||
|
||||
# Write data to coco file
|
||||
res_dir = os.path.join(args.output_dir, 'coco_data')
|
||||
bproc.writer.write_coco_annotations(res_dir,
|
||||
instance_segmaps=data["instance_segmaps"],
|
||||
instance_attribute_maps=data["instance_attribute_maps"],
|
||||
color_file_format='JPEG',
|
||||
colors=data["colors"],
|
||||
append_to_existing_output=True)
|
||||
|
||||
#загрузим аннотацию
|
||||
with open(os.path.join(res_dir,"coco_annotations.json"), "r") as fh:
|
||||
y = json.load(fh)
|
||||
|
||||
# список имен объектов
|
||||
with open(os.path.join(res_dir,"obj.names"), "w") as fh:
|
||||
for cat in y["categories"]:
|
||||
fh.write(cat["name"]+"\n")
|
||||
|
||||
# содадим или очистим папку data для датасета
|
||||
res_data = os.path.join(res_dir, 'data')
|
||||
if os.path.isdir(res_data):
|
||||
for f in os.listdir(res_data):
|
||||
os.remove(os.path.join(res_data, f))
|
||||
else:
|
||||
os.mkdir(res_data)
|
||||
|
||||
# список имен файлов с изображениями
|
||||
s = []
|
||||
with open(os.path.join(res_dir,"images.txt"), "w") as fh:
|
||||
for i in y["images"]:
|
||||
filename = i["file_name"]
|
||||
shutil.copy(os.path.join(res_dir,filename),res_data)
|
||||
fh.write(filename.replace('images','data')+"\n")
|
||||
s.append((os.path.split(filename))[1])
|
||||
|
||||
# предполагается, что "images" и "annotations" следуют в одном и том же порядке
|
||||
c = 0
|
||||
for i in y["annotations"]:
|
||||
bbox = i["bbox"]
|
||||
im_h = i["height"]
|
||||
im_w = i["width"]
|
||||
rel = convert2relative(im_h,im_w,bbox)
|
||||
fn = (os.path.splitext(s[c]))[0] # только имя файла
|
||||
with open(os.path.join(res_data,fn+".txt"), "w") as fh:
|
||||
# формат: <target> <x-center> <y-center> <width> <height>
|
||||
fh.write("0 "+'{:-f} {:-f} {:-f} {:-f}'.format(rel[0],rel[1],rel[2],rel[3])+"\n")
|
||||
c += 1
|
266
ObjectDetection/objs2Yolov4dataset.py
Normal file
266
ObjectDetection/objs2Yolov4dataset.py
Normal file
|
@ -0,0 +1,266 @@
|
|||
import blenderproc as bproc
|
||||
"""
|
||||
objs2Yolov4dataset
|
||||
Общая задача: обнаружение объекта (Object detection)
|
||||
Реализуемая функция: создание датасета в формате YoloV4 для серии заданных объектов (*.obj) в заданной сцене (*.blend)
|
||||
Используется модуль blenderproc
|
||||
|
||||
17.02.2023 @shalenikol release 0.1
|
||||
"""
|
||||
import sys
|
||||
import numpy as np
|
||||
import argparse
|
||||
import random
|
||||
import os
|
||||
import shutil
|
||||
import json
|
||||
|
||||
def convert2relative(height, width, bbox):
|
||||
"""
|
||||
YOLO format use relative coordinates for annotation
|
||||
"""
|
||||
x, y, w, h = bbox
|
||||
return x/width, y/height, w/width, h/height
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('scene', nargs='?', default="resources/sklad.blend", help="Path to the scene object.")
|
||||
parser.add_argument('obj_path', nargs='?', default="resources/in_obj", help="Path to the object files.")
|
||||
parser.add_argument('output_dir', nargs='?', default="output", help="Path to where the final files, will be saved")
|
||||
parser.add_argument('vhacd_path', nargs='?', default="blenderproc_resources/vhacd", help="The directory in which vhacd should be installed or is already installed.")
|
||||
parser.add_argument('--imgs', default=2, type=int, help="The number of times the objects should be rendered.")
|
||||
args = parser.parse_args()
|
||||
|
||||
if not os.path.isdir(args.obj_path):
|
||||
print(f"{args.obj_path} : no object directory")
|
||||
sys.exit()
|
||||
|
||||
if not os.path.isdir(args.output_dir):
|
||||
os.mkdir(args.output_dir)
|
||||
|
||||
bproc.init()
|
||||
|
||||
# ? загрузим свет из сцены
|
||||
#cam = bproc.loader.load_blend(args.scene, data_blocks=["cameras"])
|
||||
#lights = bproc.loader.load_blend(args.scene, data_blocks=["lights"])
|
||||
|
||||
# загрузим объекты
|
||||
list_files = os.listdir(args.obj_path)
|
||||
meshs = []
|
||||
i = 0
|
||||
for f in list_files:
|
||||
if (os.path.splitext(f))[1] == ".obj":
|
||||
f = os.path.join(args.obj_path, f) # путь к файлу объекта
|
||||
if os.path.isfile(f):
|
||||
meshs += bproc.loader.load_obj(f)
|
||||
i += 1
|
||||
|
||||
if i == 0:
|
||||
print("Objects not found")
|
||||
sys.exit()
|
||||
|
||||
for i,o in enumerate(meshs):
|
||||
o.set_cp("category_id", i+1)
|
||||
|
||||
# загрузим сцену
|
||||
scene = bproc.loader.load_blend(args.scene, data_blocks=["objects"])
|
||||
#scene = bproc.loader.load_obj(args.scene)
|
||||
|
||||
# найдём пол
|
||||
floor = None
|
||||
for o in scene:
|
||||
o.set_cp("category_id", 999)
|
||||
s = o.get_name()
|
||||
if s.find("floor") >= 0:
|
||||
floor = o
|
||||
if floor == None:
|
||||
print("Floor not found in the scene")
|
||||
sys.exit()
|
||||
|
||||
floor.enable_rigidbody(False, collision_shape='BOX')
|
||||
|
||||
objs = meshs + scene
|
||||
|
||||
for obj in meshs:
|
||||
# Make the object actively participate in the physics simulation
|
||||
obj.enable_rigidbody(active=True, collision_shape="COMPOUND")
|
||||
# Also use convex decomposition as collision shapes
|
||||
obj.build_convex_decomposition_collision_shape(args.vhacd_path)
|
||||
|
||||
with open(os.path.join(args.output_dir,"res.txt"), "w") as fh:
|
||||
# fh.write(str(type(scene[0]))+"\n")
|
||||
i = 0
|
||||
for o in objs:
|
||||
i += 1
|
||||
loc = o.get_location()
|
||||
euler = o.get_rotation_euler()
|
||||
fh.write(f"{i} : {o.get_name()} {loc} {euler}\n")
|
||||
|
||||
# define a light and set its location and energy level
|
||||
light = bproc.types.Light()
|
||||
light.set_type("POINT")
|
||||
light.set_location([5, -5, 5])
|
||||
#light.set_energy(900)
|
||||
#light.set_color([0.7, 0.7, 0.7])
|
||||
|
||||
light1 = bproc.types.Light(name="light1")
|
||||
light1.set_type("SUN")
|
||||
light1.set_location([0, 0, 0])
|
||||
light1.set_rotation_euler([-0.063, 0.6177, -0.1985])
|
||||
#light1.set_energy(7)
|
||||
light1.set_color([1, 1, 1])
|
||||
"""
|
||||
# Sample its location around the object
|
||||
light.set_location(bproc.sampler.shell(
|
||||
center=obj.get_location(),
|
||||
radius_min=2.5,
|
||||
radius_max=5,
|
||||
elevation_min=1,
|
||||
elevation_max=89
|
||||
))
|
||||
"""
|
||||
|
||||
# define the camera intrinsics
|
||||
bproc.camera.set_intrinsics_from_blender_params(1, 640, 480, lens_unit="FOV")
|
||||
bproc.renderer.enable_segmentation_output(map_by=["category_id", "instance", "name"])
|
||||
|
||||
res_dir = os.path.join(args.output_dir, 'coco_data')
|
||||
# Цикл рендеринга
|
||||
n = 3 # количество сэмплов для каждой локации камеры
|
||||
# Do multiple times: Position the shapenet objects using the physics simulator and render X images with random camera poses
|
||||
for r in range(args.imgs):
|
||||
# Randomly set the color and energy
|
||||
light.set_color(np.random.uniform([0.5, 0.5, 0.5], [1, 1, 1]))
|
||||
light.set_energy(random.uniform(500, 1000))
|
||||
light1.set_energy(random.uniform(3, 11))
|
||||
|
||||
for i,o in enumerate(objs):
|
||||
mat = o.get_materials()[0]
|
||||
mat.set_principled_shader_value("Specular", random.uniform(0, 1))
|
||||
mat.set_principled_shader_value("Roughness", random.uniform(0, 1))
|
||||
mat.set_principled_shader_value("Base Color", np.random.uniform([0, 0, 0, 1], [1, 1, 1, 1]))
|
||||
mat.set_principled_shader_value("Metallic", random.uniform(0, 1))
|
||||
|
||||
# Clear all key frames from the previous run
|
||||
bproc.utility.reset_keyframes()
|
||||
|
||||
# Define a function that samples 6-DoF poses
|
||||
def sample_pose(obj: bproc.types.MeshObject):
|
||||
obj.set_location(np.random.uniform([-1, -1.5, 0.2], [1, 2, 1.2])) #[-1, -1, 0], [1, 1, 2]))
|
||||
obj.set_rotation_euler(bproc.sampler.uniformSO3())
|
||||
|
||||
# Sample the poses of all shapenet objects above the ground without any collisions in-between
|
||||
bproc.object.sample_poses(meshs, objects_to_check_collisions = meshs + [floor], sample_pose_func = sample_pose)
|
||||
|
||||
# Run the simulation and fix the poses of the shapenet objects at the end
|
||||
bproc.object.simulate_physics_and_fix_final_poses(min_simulation_time=4, max_simulation_time=20, check_object_interval=1)
|
||||
|
||||
# Find point of interest, all cam poses should look towards it
|
||||
poi = bproc.object.compute_poi(meshs)
|
||||
|
||||
coord_max = [0.1, 0.1, 0.1]
|
||||
coord_min = [0., 0., 0.]
|
||||
|
||||
with open(os.path.join(args.output_dir,"res.txt"), "a") as fh:
|
||||
fh.write("*****************\n")
|
||||
fh.write(f"{r}) poi = {poi}\n")
|
||||
i = 0
|
||||
for o in meshs:
|
||||
i += 1
|
||||
loc = o.get_location()
|
||||
euler = o.get_rotation_euler()
|
||||
fh.write(f" {i} : {o.get_name()} {loc} {euler}\n")
|
||||
for j in range(3):
|
||||
if loc[j] < coord_min[j]:
|
||||
coord_min[j] = loc[j]
|
||||
if loc[j] > coord_max[j]:
|
||||
coord_max[j] = loc[j]
|
||||
|
||||
# Sample up to X camera poses
|
||||
#an = np.random.uniform(0.78, 1.2) #1. #0.35
|
||||
for i in range(5):
|
||||
# Sample location
|
||||
location = bproc.sampler.shell(center=[0, 0, 0],
|
||||
radius_min=1.1,
|
||||
radius_max=3.3,
|
||||
elevation_min=5,
|
||||
elevation_max=89)
|
||||
# координата, по которой будем сэмплировать положение камеры
|
||||
j = random.randint(0, 2)
|
||||
# разовый сдвиг по случайной координате
|
||||
d = (coord_max[j] - coord_min[j]) / n
|
||||
if location[j] < 0:
|
||||
d = -d
|
||||
for k in range(n):
|
||||
# Compute rotation based on vector going from location towards poi
|
||||
rotation_matrix = bproc.camera.rotation_from_forward_vec(poi - location, inplane_rot=np.random.uniform(-0.7854, 0.7854))
|
||||
# Add homog cam pose based on location an rotation
|
||||
cam2world_matrix = bproc.math.build_transformation_mat(location, rotation_matrix)
|
||||
bproc.camera.add_camera_pose(cam2world_matrix)
|
||||
location[j] -= d
|
||||
#world_matrix = bproc.math.build_transformation_mat([2.3, -0.4, 0.66], [1.396, 0., an])
|
||||
#bproc.camera.add_camera_pose(world_matrix)
|
||||
#an += 0.2
|
||||
|
||||
# render the whole pipeline
|
||||
data = bproc.renderer.render()
|
||||
|
||||
# Write data to coco file
|
||||
bproc.writer.write_coco_annotations(res_dir,
|
||||
instance_segmaps=data["instance_segmaps"],
|
||||
instance_attribute_maps=data["instance_attribute_maps"],
|
||||
color_file_format='JPEG',
|
||||
colors=data["colors"],
|
||||
append_to_existing_output=True)
|
||||
|
||||
#загрузим аннотацию
|
||||
with open(os.path.join(res_dir,"coco_annotations.json"), "r") as fh:
|
||||
y = json.load(fh)
|
||||
|
||||
# список имен объектов
|
||||
j = 0
|
||||
obj_list = []
|
||||
with open(os.path.join(res_dir,"obj.names"), "w") as fh:
|
||||
for cat in y["categories"]:
|
||||
if cat["id"] < 999:
|
||||
n = cat["name"]
|
||||
i = cat["id"]
|
||||
obj_list.append([n,i,j])
|
||||
fh.write(n+"\n")
|
||||
j += 1
|
||||
|
||||
# содадим или очистим папку data для датасета
|
||||
res_data = os.path.join(res_dir, 'data')
|
||||
if os.path.isdir(res_data):
|
||||
for f in os.listdir(res_data):
|
||||
os.remove(os.path.join(res_data, f))
|
||||
else:
|
||||
os.mkdir(res_data)
|
||||
|
||||
# список имен файлов с изображениями
|
||||
img_list = []
|
||||
with open(os.path.join(res_dir,"images.txt"), "w") as fh:
|
||||
for i in y["images"]:
|
||||
filename = i["file_name"]
|
||||
shutil.copy(os.path.join(res_dir,filename),res_data)
|
||||
fh.write(filename.replace('images','data')+"\n")
|
||||
img_list.append([i["id"], (os.path.split(filename))[1]])
|
||||
|
||||
# заполним файлы с метками bbox
|
||||
for i in y["annotations"]:
|
||||
cat_id = i["category_id"]
|
||||
if cat_id < 999:
|
||||
im_id = i["image_id"]
|
||||
bbox = i["bbox"]
|
||||
im_h = i["height"]
|
||||
im_w = i["width"]
|
||||
rel = convert2relative(im_h,im_w,bbox)
|
||||
|
||||
# находим индекс списка с нужным изображением
|
||||
j = next(k for k, (x, _) in enumerate(img_list) if x == im_id)
|
||||
filename = img_list[j][1]
|
||||
fn = (os.path.splitext(filename))[0] # только имя файла
|
||||
with open(os.path.join(res_data,fn+".txt"), "a") as fh:
|
||||
# находим индекс списка с нужным объектом
|
||||
j = next(k for k, (_, x, _) in enumerate(obj_list) if x == cat_id)
|
||||
# формат: <target> <x-center> <y-center> <width> <height>
|
||||
fh.write(f"{obj_list[j][2]} {rel[0]} {rel[1]} {rel[2]} {rel[3]}\n")
|
1160
ObjectDetection/yolov4_min.cfg
Normal file
1160
ObjectDetection/yolov4_min.cfg
Normal file
File diff suppressed because it is too large
Load diff
7
ObjectDetection/yolov4_min.data
Normal file
7
ObjectDetection/yolov4_min.data
Normal file
|
@ -0,0 +1,7 @@
|
|||
classes= 1
|
||||
train = i_train.txt
|
||||
valid = i_val.txt
|
||||
names = obj.names
|
||||
backup = backup
|
||||
eval=coco
|
||||
|
1159
ObjectDetection/yolov4_objs2.cfg
Normal file
1159
ObjectDetection/yolov4_objs2.cfg
Normal file
File diff suppressed because it is too large
Load diff
7
ObjectDetection/yolov4_objs2.data
Normal file
7
ObjectDetection/yolov4_objs2.data
Normal file
|
@ -0,0 +1,7 @@
|
|||
classes= 6
|
||||
train = i_train.txt
|
||||
valid = i_val.txt
|
||||
names = obj.names
|
||||
backup = backup
|
||||
eval=coco
|
||||
|
Loading…
Add table
Add a link
Reference in a new issue