update scripts in web_p (dataset, train)

This commit is contained in:
shalenikol 2025-02-28 20:21:33 +03:00
parent 7d3b8ff0cb
commit c85784f3dc
3 changed files with 256 additions and 29 deletions

64
web_p/rbs_train2.py Normal file
View file

@ -0,0 +1,64 @@
"""
rbs_train2
Общая задача: web-service pipeline
Реализуемая функция: обучение нейросетевой модели по заданному BOP-датасету
python3 $PYTHON_EDUCATION --path /home/user/webservice/server/build/public/process/proc/inst_proc \
--form /home/user/webservice/server/build/public/process/proc/inst_proc/form.json
28.01.2025 @shalenikol release 0.1
17.02.2025 @shalenikol release 0.2 addon_dir
"""
import argparse
import os
import json
from train_Yolo import train_YoloV8
from train_Dope import train_Dope_i
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--path", required=True, help="Output path for weights")
parser.add_argument("--form", required=True, help="Json-file with training parameters")
args = parser.parse_args()
if not os.path.isdir(args.path):
print(f"Invalid output path '{args.path}'")
exit(-1)
wname = os.path.basename(args.path)
outpath = os.path.dirname(args.path)
if not os.path.isfile(args.form):
print(f"Error: no such file '{args.form}'")
exit(-2)
with open(args.form, "r") as f:
j_data = f.read()
try:
cfg = json.loads(j_data)
except json.JSONDecodeError as e:
print(f"JSon error: {e}")
exit(-3)
cfg = cfg["output"] # edited params
dataset_params = cfg["process"]["selectProcess"]["value"]
dataset_type = dataset_params["type"]
if dataset_type != "BOP_DATASET":
print(f"Error: Invalid dataset type '{dataset_type}'")
exit(-4)
dataset_name = dataset_params["instanceName"]
dataset_path = dataset_params["path"]
dataset_path = dataset_path.replace("//", "/") # !!! TODO !!! Nikita
epoch = cfg["n_epoch"]
pretrain = (cfg["pretrain"] == "True") #False
ttype = cfg["typeWeight"] #"ObjectDetection"
addon_dir = ""
if "addon" in cfg:
addon = cfg["addon"].strip()
if addon and os.path.isdir(addon):
addon_dir = addon
if ttype == "ObjectDetection":
train_YoloV8(dataset_path, wname, dataset_name, outpath, epoch, pretrain, addon_dir)
else:
train_Dope_i(dataset_path, wname, dataset_name, outpath, epoch, pretrain)

View file

@ -8,6 +8,7 @@ import blenderproc as bproc
02.05.2024 @shalenikol release 0.1
02.07.2024 @shalenikol release 0.2
28.10.2024 @shalenikol release 0.3
28.02.2025 @shalenikol release 0.4 blenderproc 2.8.0 + blender 4.2.1 LTS
"""
import numpy as np
import argparse
@ -16,17 +17,32 @@ import os
import shutil
import json
from pathlib import Path
import time
###########################
# !!! чтобы избежать ошибки в версии 2.8.0
# free(): invalid pointer
# при вызове bproc.writer.write_bop
import pyrender
from pyrender.platforms import egl
###########################
start_time = time.time() # Запоминаем время начала
import bpy
VHACD_PATH = "blenderproc_resources/vhacd"
DIR_MODELS = "models"
DIR_MESH = "assets/libs/objects/"
# DIR_MESH = "assets/libs/objects/"
FILE_LOG_SCENE = "res.txt"
FILE_RBS_INFO = "rbs_info.json"
FILE_GT_COCO = "scene_gt_coco.json"
EXT_MODELS = ".fbx"
FILE_PARAMS = "form.json"
PROCEDURAL_TEXTURE = "texture_path" # key in randomization params: for texture types (Noise Textures), (Procedural Patterns) or (Tileable Textures)
EXT_MODELS = ".fbx" # for scene objects (floor ...)
DETAIL_KEY = "daeUrl" # "fbx" # key in dict 'Detail' for mesh path of model
TEXTURE_TMPL = "*.jpg"
TEXTURE_IMAGE_TYPES = ["Base Color", "Metallic", "Normal", "Roughness", "Specular IOR Level"]
Not_Categories_Name = True # наименование категории в COCO-аннотации отсутствует
@ -57,19 +73,50 @@ def convert2relative(height, width, bbox):
y += h/2
return x/width, y/height, w/width, h/height
def convert_seconds(total_seconds):
hours = int(total_seconds // 3600)
minutes = int((total_seconds % 3600) // 60)
seconds = int(total_seconds % 60)
return f"{hours:02}:{minutes:02}:{seconds:02}"
def render() -> int:
res_dir = rnd_par.output_dir
log_dir = os.path.dirname(res_dir)
# copy file with randomization params
file_params = os.path.join(res_dir, FILE_PARAMS)
if os.path.isfile(file_params):
shutil.copy2(file_params, log_dir)
if os.path.isdir(res_dir):
shutil.rmtree(res_dir)
i = 0
for obj in all_meshs:
# Make the object actively participate in the physics simulation
obj.enable_rigidbody(active=True, collision_shape="COMPOUND")
# Also use convex decomposition as collision shapes
obj.build_convex_decomposition_collision_shape(VHACD_PATH)
# # это для procedural texture, но пока не правильно
# fn = (os.path.splitext(rnd_par.models.filenames[i]))[0] + ".jpg" # файл с текстурой
# if os.path.isfile(fn):
# material = bproc.material.create_material_from_texture(fn, material_name="texture_model"+str(i))
# # Применяем текстуру к материалу
# obj.replace_materials(material)
tex = rnd_par.models.textures[i] # описание текстур
if tex["is"]:
mat = bproc.material.create("m"+str(i))
for x in tex["t_images"]:
key = list(x.keys())[0]
mat.set_principled_shader_value(key, bpy.data.images.load(filepath=x[key]))
obj.replace_materials(mat)
i += 1
# print(f"{i} : {obj.get_name()}")
objs = all_meshs + rnd_par.scene.objs
log_txt = os.path.join(os.path.dirname(rnd_par.output_dir), FILE_LOG_SCENE)
log_txt = os.path.join(log_dir, FILE_LOG_SCENE)
with open(log_txt, "w") as fh:
for i,o in enumerate(objs):
loc = o.get_location()
@ -91,23 +138,21 @@ def render() -> int:
rnd_par.image_size_wh[1],
lens_unit="FOV")
# Enable transparency so the background becomes transparent
bproc.renderer.set_output_format(enable_transparency=True) # ???
# add segmentation masks (per class and per instance)
bproc.renderer.enable_segmentation_output(map_by=["category_id", "instance", "name"])
# activate depth rendering
bproc.renderer.enable_depth_output(activate_antialiasing=False)
# res_dir = os.path.join(rnd_par.output_dir, rnd_par.ds_name)
res_dir = rnd_par.output_dir
if os.path.isdir(res_dir):
shutil.rmtree(res_dir)
# Цикл рендеринга
# Do multiple times: Position the shapenet objects using the physics simulator and render X images with random camera poses
for r in range(rnd_par.n_series):
print(f"********** Series : {r+1}")
is_texture = True if "texture_path" in rnd_par.models_randomization else False
is_texture = True if PROCEDURAL_TEXTURE in rnd_par.models_randomization else False
if is_texture:
val = rnd_par.models_randomization["texture_path"]
val = rnd_par.models_randomization[PROCEDURAL_TEXTURE]
l_texture = _get_list_texture(val)
image = bpy.data.images.load(filepath=str(l_texture[r % len(l_texture)]))
# один случайный объект в кадре / все заданные объекты
@ -125,16 +170,32 @@ def render() -> int:
for i,o in enumerate(rnd_par.scene.objs): # объекты сцены
rnd_mat = rnd_par.scene.obj_data[i]["material_randomization"]
# if PROCEDURAL_TEXTURE in rnd_mat: # путь к текстурам (*.jpg)
# mat = bproc.material.create("m"+str(i))
# # for x in tex["t_images"]:
# # key = list(x.keys())[0]
# val = rnd_mat[PROCEDURAL_TEXTURE]
# val = _get_list_texture(val)
# image = bpy.data.images.load(filepath=str(random.choice(val)))
# mat.set_principled_shader_value("Base Color", image)
# o.replace_materials(mat)
mats = o.get_materials() #[0]
for mat in mats:
# with open(log_txt, "a") as fh:
# fh.write("************* mat\n")
# fh.write(f"{mat}\n")
val = rnd_mat["specular"]
mat.set_principled_shader_value("Specular", random.uniform(val[0], val[1]))
mat.set_principled_shader_value("Specular IOR Level", random.uniform(val[0], val[1])) # для Blender < 4.2 было "Specular"
val = rnd_mat["roughness"]
mat.set_principled_shader_value("Roughness", random.uniform(val[0], val[1]))
val = rnd_mat["metallic"]
mat.set_principled_shader_value("Metallic", random.uniform(val[0], val[1]))
if "texture_path" in rnd_mat: # путь к текстурам (*.jpg)
val = rnd_mat["texture_path"]
if PROCEDURAL_TEXTURE in rnd_mat: # путь к текстурам (*.jpg)
val = rnd_mat[PROCEDURAL_TEXTURE]
val = _get_list_texture(val)
image = bpy.data.images.load(filepath=str(random.choice(val)))
mat.set_principled_shader_value("Base Color", image)
@ -156,7 +217,7 @@ def render() -> int:
# Define a function that samples 6-DoF poses
def sample_pose(obj: bproc.types.MeshObject):
obj.set_location(np.random.uniform(rnd_par.loc_range_low, rnd_par.loc_range_high)) #[-1, -1, 0], [1, 1, 2]))
obj.set_rotation_euler(bproc.sampler.uniformSO3())
obj.set_rotation_euler(bproc.sampler.uniformSO3(around_x=rnd_par.around_x, around_y=rnd_par.around_y, around_z=rnd_par.around_z))
# Sample the poses of all shapenet objects above the ground without any collisions in-between
bproc.object.sample_poses(meshs,
@ -232,7 +293,12 @@ def render() -> int:
rec["name"] = objn
rec["model"] = os.path.join(DIR_MODELS, os.path.split(rnd_par.models.filenames[i])[1]) # путь относительный
t = [obj.get_bound_box(local_coords=True).tolist() for obj in all_meshs if obj.get_name() == objn]
rec["cuboid"] = t[0]
if len(t) > 0:
rec["cuboid"] = t[0]
else: # object name does not match file name
rec["Error"] = "!!! object name does not match file name: cuboid is zero"
rec["cuboid"] = np.zeros((8, 3)).tolist()
data.append(rec)
shutil.copy2(rnd_par.models.filenames[i], models_dir)
f = (os.path.splitext(rnd_par.models.filenames[i]))[0] + ".mtl" # файл материала
@ -283,9 +349,37 @@ def render() -> int:
if Not_Categories_Name:
explore(res_dir)
end_time = time.time() # время окончания
execution_time = end_time - start_time # время выполнения
with open(log_txt, "a") as fh:
fh.write("*****************\n")
fh.write(f"Время выполнения: {convert_seconds(execution_time)}\n")
return 0 # success
def _get_models(par, data) -> int:
def set_texture_model(name: str, textures: list, model_d) -> None:
"""
textures заполняется массивом текстур вида:
[{"is": True, "t_images": [{"Base Color":"/path/to/shkaf_d.png"}, {"Normal":"/path/to/shkaf_n.png"}] }, ... ]
"""
d = {"is": False}
if "models" in model_d:
for model in model_d["models"]:
if model["name"] == name:
path = model["texture_dir"].strip()
if path:
t_images = []
for x in TEXTURE_IMAGE_TYPES:
if x in model:
rel_path = model[x].strip()
if rel_path:
t_images.append({x: os.path.join(path, rel_path)})
if len(t_images):
d["is"] = True
d["t_images"] = t_images
textures.append(d)
def _get_models(par, data, models_data) -> int:
global all_meshs
par.models = lambda: None
@ -294,13 +388,14 @@ def _get_models(par, data) -> int:
return 0 # no models
# загрузим объекты
par.models.names = [] # obj_names
par.models.filenames = [] # obj_filenames
par.models.names = []
par.models.filenames = []
par.models.textures = []
i = 1
for f in data:
nam = f["name"]
par.models.names.append(nam)
ff = f["fbx"] # _get_path_model(nam)
ff = f[DETAIL_KEY] # _get_path_model(nam)
par.models.filenames.append(ff)
if not os.path.isfile(ff):
print(f"Error: no such file '{ff}'")
@ -311,6 +406,7 @@ def _get_models(par, data) -> int:
obj = bproc.loader.load_obj(ff)
all_meshs += obj
obj[0].set_cp("category_id", i) # начиная с 1
set_texture_model(nam, par.models.textures, models_data)
i += 1
return par.models.n_item
@ -370,8 +466,6 @@ if __name__ == "__main__":
print(f"JSon error: {e}")
exit(-2)
# output_dir = args.path
ds_cfg = cfg["output"] # dataset config
generation = ds_cfg["generation"]
cam_pos = ds_cfg["camera_position"]
@ -399,11 +493,14 @@ if __name__ == "__main__":
rnd_par.models_randomization = models_randomization
rnd_par.loc_range_low = models_randomization["loc_range_low"]
rnd_par.loc_range_high = models_randomization["loc_range_high"]
rnd_par.around_x = (models_randomization["around_x"] == "True")
rnd_par.around_y = (models_randomization["around_y"] == "True")
rnd_par.around_z = (models_randomization["around_z"] == "True")
bproc.init()
all_meshs = []
if _get_models(rnd_par, rnd_par.dataset_objs) <= 0:
if _get_models(rnd_par, rnd_par.dataset_objs, models_randomization) <= 0:
print("Error: no models in config")
exit(-4)
if _get_scene(rnd_par, ds_cfg["scene"]) <= 0:

View file

@ -7,6 +7,8 @@
--name test123 --datasetName ds213 --outpath /Users/idontsudo/webservice/server/build/public/7065d6b6-c8a3-48c5-9679-bb8f3a690296/weights
27.04.2024 @shalenikol release 0.1
20.11.2024 @shalenikol release 0.2 parser.add_argument("--addon", default="", help="Folder with add-on for dataset")
20.02.2025 @shalenikol release 0.2.1 add_on_dataset : fix
"""
import os
import shutil
@ -14,9 +16,13 @@ import json
import yaml
from ultralytics import YOLO
# from ultralytics import settings
# from ultralytics.utils.metrics import DetMetrics
# import torch
# import torch.profiler
# import torch.utils.data
FILE_BASEMODEL = "yolov8n.pt"
FILE_BASEMODEL = "yolov8s.pt" #"yolov8n.pt"
FILE_RBS_INFO = "rbs_info.json"
FILE_RBS_TRAIN = "rbs_train.yaml"
FILE_GT_COCO = "scene_gt_coco.json"
@ -27,6 +33,7 @@ DIR_ROOT_DS = "datasets"
DIR_COCO_DS = "rbs_coco"
DIR_RGB_DS = "images"
DIR_LABELS_DS = "labels"
LABELS_EXT = ".txt"
SZ_SERIES = 15 # number of train images per validation images
@ -40,6 +47,50 @@ def convert2relative(height, width, bbox):
y += h/2
return x/width, y/height, w/width, h/height
def add_on_dataset(source_dir, target_dir) -> dict:
global nn_image, f1, f2
# Получаем список файлов в исходной директории
files = sorted(os.listdir(source_dir))
# Словарь для отслеживания порядковых номеров для каждого имени файла
file_nn = {}
for file in files:
if os.path.isdir(os.path.join(source_dir, file)):
continue
# Получаем имя файла и его расширение
file_name, file_extension = os.path.splitext(file)
# Запоминаем порядковый номер для данного имени файла
if file_name in file_nn:
nn = file_nn[file_name]
else: # new file name
nn = nn_image # текущий номер
file_nn[file_name] = nn_image
nn_image += 1
# Создаем новое имя файла
new_file_name = f"{nn:06}{file_extension}"
if file_extension == LABELS_EXT:
new_file_path = os.path.join(target_dir, DIR_LABELS_DS)
else:
new_file_path = os.path.join(target_dir, DIR_RGB_DS)
line = os.path.join("./", DIR_RGB_DS, new_file_name) + "\n"
if nn % SZ_SERIES == 0:
f2.write(line)
else:
f1.write(line)
# Полные пути к старому и новому файлам
old_file_path = os.path.join(source_dir, file)
new_file_path = os.path.join(new_file_path, new_file_name)
# Копируем файл
shutil.copy2(old_file_path, new_file_path)
return file_nn
def gt_parse(path: str, out_dir: str):
global nn_image, f1, f2
with open(os.path.join(path, FILE_GT_COCO), "r") as fh:
@ -67,12 +118,12 @@ def gt_parse(path: str, out_dir: str):
# формат: <target> <x-center> <y-center> <width> <height>
fh.write(f"{cat_id-1} {rel[0]} {rel[1]} {rel[2]} {rel[3]}\n") # category from 0
nn_image += 1
line = os.path.join("./", DIR_RGB_DS, f + ext) + "\n"
if nn_image % SZ_SERIES == 0:
f2.write(line)
else:
f1.write(line)
nn_image += 1
def explore(path: str, res_dir: str):
if not os.path.isdir(path):
@ -88,7 +139,7 @@ def explore(path: str, res_dir: str):
else:
explore(path_entry, res_dir)
def BOP2Yolo_dataset(dpath: str, out_dir: str, lname: list) -> str:
def BOP2Yolo_dataset(dpath: str, out_dir: str, lname: list, addon:str) -> str:
""" Convert BOP-dataset to YOLO format for train """
cfg_yaml = os.path.join(out_dir, FILE_RBS_TRAIN)
p = os.path.join(out_dir, DIR_ROOT_DS, DIR_COCO_DS)
@ -116,12 +167,14 @@ def BOP2Yolo_dataset(dpath: str, out_dir: str, lname: list) -> str:
f1 = open(os.path.join(res_dir, FILE_L_TRAIN), "w")
f2 = open(os.path.join(res_dir, FILE_L_VAL), "w")
explore(dpath, res_dir)
if addon:
add_on_dataset(addon, res_dir)
f1.close()
f2.close()
return out_dir
def train_YoloV8(path:str, wname:str, dname:str, outpath:str, epochs:int, pretrain: bool):
def train_YoloV8(path:str, wname:str, dname:str, outpath:str, epochs:int, pretrain: bool, addon: str):
""" Main procedure for train YOLOv8 model """
if not os.path.isdir(outpath):
print(f"Invalid output path '{outpath}'")
@ -151,21 +204,33 @@ def train_YoloV8(path:str, wname:str, dname:str, outpath:str, epochs:int, pretra
# список имён объектов
list_name = list(map(lambda x: x["name"], y))
dpath = BOP2Yolo_dataset(ds_path, out_dir, list_name)
dpath = BOP2Yolo_dataset(ds_path, out_dir, list_name, addon)
if len(dpath) == 0:
print(f"Error in convert dataset '{ds_path}' to '{outpath}'")
exit(-4)
model_path = os.path.join(dpath, FILE_BASEMODEL)
model = YOLO(model_path)
results = model.train(data=os.path.join(dpath, FILE_RBS_TRAIN), epochs=epochs, project=out_dir)
# # Update settings
# settings.update({"profile": True})
# prof = torch.profiler.profile(
# schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=1),
# on_trace_ready=torch.profiler.tensorboard_trace_handler('./log/resnet18'),
# record_shapes=True,
# with_stack=True)
# prof.start()
results = model.train(data=os.path.join(dpath, FILE_RBS_TRAIN), epochs=epochs, project=out_dir) #, log_dir="runs/train")
# prof.stop()
wf = os.path.join(results.save_dir, FILE_TRAIN_RES)
if not os.path.isfile(wf):
print(f"Error in train: no result file '{wf}'")
exit(-5)
shutil.copy2(wf, os.path.join(dpath, wname + ".pt"))
shutil.rmtree(results.save_dir)
# shutil.rmtree(results.save_dir)
if __name__ == "__main__":
import argparse
@ -176,6 +241,7 @@ if __name__ == "__main__":
parser.add_argument("--outpath", default="weights", help="Output path for weights")
parser.add_argument("--epoch", default=3, type=int, help="How many training epochs")
parser.add_argument('--pretrain', action="store_true", help="Use pretraining")
parser.add_argument("--addon", default="", help="Folder with add-on for dataset")
args = parser.parse_args()
train_YoloV8(args.path, args.name, args.datasetName, args.outpath, args.epoch, args.pretrain)
train_YoloV8(args.path, args.name, args.datasetName, args.outpath, args.epoch, args.pretrain, args.addon)