diff --git a/examples/exam_check_data.py b/examples/exam_check_data.py index 9fa0916..3594c62 100644 --- a/examples/exam_check_data.py +++ b/examples/exam_check_data.py @@ -9,7 +9,7 @@ if __name__ == '__main__': #################################################################### - graspnet_root = '/home/gmh/graspnet' ### ROOT PATH FOR GRASPNET ### + graspnet_root = '/home/alvin/data/graspnet' ### ROOT PATH FOR GRASPNET ### #################################################################### g = GraspNet(graspnet_root, 'kinect', 'all') diff --git a/examples/exam_convert.py b/examples/exam_convert.py index 2251077..8a8e242 100644 --- a/examples/exam_convert.py +++ b/examples/exam_convert.py @@ -13,7 +13,7 @@ annId = 3 #################################################################### -graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET +graspnet_root = '/home/alvin/data/graspnet' # ROOT PATH FOR GRASPNET #################################################################### g = GraspNet(graspnet_root, camera = camera, split = 'all') diff --git a/examples/exam_convert_data_to_cloud.py b/examples/exam_convert_data_to_cloud.py new file mode 100644 index 0000000..a401330 --- /dev/null +++ b/examples/exam_convert_data_to_cloud.py @@ -0,0 +1,43 @@ +from graspnetAPI import GraspNet +import cv2 +import open3d as o3d +import os +from tqdm import tqdm + +# load all dataset in scene, and convert depth and color image to rgb cloud + +camera = 'realsense' +# sceneId = 160 +# annId = 3 + +#################################################################### +graspnet_root = '/home/alvin/data/graspnet' # ROOT PATH FOR GRASPNET +#################################################################### + +# save cloud in pcd format +cloud_dir = os.path.join(graspnet_root, 'cloud', camera) +if not os.path.exists(cloud_dir): + os.makedirs(cloud_dir) +else: + # remove old cloud + cloud_files = os.listdir(cloud_dir) + for cloud_file in cloud_files: + os.remove(os.path.join(cloud_dir, cloud_file)) + +g = GraspNet(graspnet_root, camera = camera, split = 'train') + +# load rgb and visualized for 1 sec +for sceneId in tqdm(range(160, 190)): + for annId in range(0, 256, 8): + # bgr = g.loadBGR(sceneId = sceneId, annId = annId, camera = camera) + # cv2.imshow('bgr', bgr) + # cv2.waitKey(2000) + # cv2.destroyAllWindows() + + cloud = g.loadScenePointCloud(sceneId = sceneId, annId = annId, camera = camera, use_workspace = True) + + # visualize the cloud using open3d + # o3d.visualization.draw_geometries([cloud]) + + cloud_file = os.path.join(cloud_dir, 'scene{}_{}.pcd'.format(sceneId, annId)) + o3d.io.write_point_cloud(cloud_file, cloud) \ No newline at end of file diff --git a/examples/exam_eval.py b/examples/exam_eval.py index b1643a1..2e30b31 100644 --- a/examples/exam_eval.py +++ b/examples/exam_eval.py @@ -7,7 +7,7 @@ from graspnetAPI import GraspNetEval #################################################################### -graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET +graspnet_root = '/home/alvin/data/graspnet' # ROOT PATH FOR GRASPNET dump_folder = '/home/gmh/git/rgbd_graspnet/dump_affordance_iounan/' # ROOT PATH FOR DUMP #################################################################### diff --git a/examples/exam_loadGrasp.py b/examples/exam_loadGrasp.py index 3dbeadb..69249c4 100644 --- a/examples/exam_loadGrasp.py +++ b/examples/exam_loadGrasp.py @@ -9,11 +9,11 @@ # change the graspnet_root path #################################################################### -graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET +graspnet_root = '/home/alvin/data/graspnet' # ROOT PATH FOR GRASPNET #################################################################### -sceneId = 1 -annId = 3 +sceneId = 1 # 场景 +annId = 30 # 场景中的一个视角 # initialize a GraspNet instance g = GraspNet(graspnet_root, camera='kinect', split='train') diff --git a/examples/exam_nms.py b/examples/exam_nms.py index 31eb09a..e2601ac 100644 --- a/examples/exam_nms.py +++ b/examples/exam_nms.py @@ -5,7 +5,7 @@ # change the graspnet_root path #################################################################### -graspnet_root = '/home/gmh/graspnet' # ROOT PATH FOR GRASPNET +graspnet_root = '/home/alvin/data/graspnet' # ROOT PATH FOR GRASPNET #################################################################### sceneId = 1 diff --git a/gen_pickle_dexmodel.py b/gen_pickle_dexmodel.py index f9c8c66..20d5ecd 100644 --- a/gen_pickle_dexmodel.py +++ b/gen_pickle_dexmodel.py @@ -6,10 +6,11 @@ import os ##### Change the root to your path ##### -graspnet_root = '/home/gmh/graspnet' +graspnet_root = '/home/alvin/data/graspnet' ##### Do NOT change this folder name ##### dex_folder = 'dex_models' +dex_folder = os.path.join(graspnet_root, dex_folder) if not os.path.exists(dex_folder): os.makedirs(dex_folder) diff --git a/graspnetAPI/graspnet.py b/graspnetAPI/graspnet.py index a9cebc0..e3efd72 100755 --- a/graspnetAPI/graspnet.py +++ b/graspnetAPI/graspnet.py @@ -121,7 +121,7 @@ def __init__(self, root, camera='kinect', split='train', sceneIds=[]): self.metaPath.append(os.path.join( root, 'scenes', 'scene_'+str(i).zfill(4), camera, 'meta', str(img_num).zfill(4)+'.mat')) self.rectLabelPath.append(os.path.join( - root, 'scenes', 'scene_'+str(i).zfill(4), camera, 'rect', str(img_num).zfill(4)+'.npy')) + root, 'rect_labels', 'scene_'+str(i).zfill(4), camera, str(img_num).zfill(4)+'.npy')) self.sceneName.append('scene_'+str(i).zfill(4)) self.annId.append(img_num) @@ -663,7 +663,7 @@ def loadGrasp(self, sceneId, annId=0, format = '6d', camera='kinect', grasp_labe return grasp_group else: # 'rect' - rect_grasps = RectGraspGroup(os.path.join(self.root,'scenes','scene_%04d' % sceneId,camera,'rect','%04d.npy' % annId)) + rect_grasps = RectGraspGroup(os.path.join(self.root,'rect_labels','scene_%04d' % sceneId,camera,'%04d.npy' % annId)) return rect_grasps def loadData(self, ids=None, *extargs): diff --git a/graspnetAPI/graspnet_eval.py b/graspnetAPI/graspnet_eval.py index a8880c9..399414d 100644 --- a/graspnetAPI/graspnet_eval.py +++ b/graspnetAPI/graspnet_eval.py @@ -80,7 +80,6 @@ def get_model_poses(self, scene_id, ann_id): camera_pose = camera_poses[ann_id] align_mat_path = os.path.join(self.root, 'scenes', get_scene_name(scene_id), self.camera, 'cam0_wrt_table.npy') align_mat = np.load(align_mat_path) - # print('Scene {}, {}'.format(scene_id, camera)) scene_reader = xmlReader(os.path.join(scene_dir, get_scene_name(scene_id), self.camera, 'annotations', '%04d.xml'% (ann_id,))) posevectors = scene_reader.getposevectorlist() obj_list = [] @@ -91,7 +90,7 @@ def get_model_poses(self, scene_id, ann_id): pose_list.append(mat) return obj_list, pose_list, camera_pose, align_mat - def eval_scene(self, scene_id, dump_folder, TOP_K = 50, return_list = False,vis = False, max_width = 0.1): + def eval_scene(self, scene_id, dump_folder, TOP_K = 50, skip_ann = 0, vis = False, return_list = False, max_width = 0.1): ''' **Input:** @@ -111,6 +110,7 @@ def eval_scene(self, scene_id, dump_folder, TOP_K = 50, return_list = False,vis - scene_accuracy: np.array of shape (256, 50, 6) of the accuracy tensor. ''' + config = get_config() table = create_table_points(1.0, 1.0, 0.05, dx=-0.5, dy=-0.5, dz=-0.05, grid_size=0.008) @@ -118,6 +118,7 @@ def eval_scene(self, scene_id, dump_folder, TOP_K = 50, return_list = False,vis model_list, dexmodel_list, _ = self.get_scene_models(scene_id, ann_id=0) + model_sampled_list = list() for model in model_list: model_sampled = voxel_sample_points(model, 0.008) @@ -128,7 +129,12 @@ def eval_scene(self, scene_id, dump_folder, TOP_K = 50, return_list = False,vis score_list_list = [] collision_list_list = [] - for ann_id in range(256): + if skip_ann > 0: + ann_range = range(0, 256, skip_ann) + else: + ann_range = range(256) + + for ann_id in ann_range: grasp_group = GraspGroup().from_npy(os.path.join(dump_folder,get_scene_name(scene_id), self.camera, '%04d.npy' % (ann_id,))) _, pose_list, camera_pose, align_mat = self.get_model_poses(scene_id, ann_id) table_trans = transform_points(table, np.linalg.inv(np.matmul(align_mat, camera_pose))) @@ -154,11 +160,28 @@ def eval_scene(self, scene_id, dump_folder, TOP_K = 50, return_list = False,vis grasp_list_list.append([]) score_list_list.append([]) collision_list_list.append([]) - print('\rMean Accuracy for scene:{} ann:{}='.format(scene_id, ann_id),np.mean(grasp_accuracy[:,:]), end='') + # print('\rMean Accuracy for scene:{} ann:{}='.format(scene_id, ann_id),np.mean(grasp_accuracy[:,:]), end='') + print('No grasp detected for scene:{} ann:{}'.format(scene_id, ann_id)) continue # concat into scene level grasp_list, score_list, collision_mask_list = np.concatenate(grasp_list), np.concatenate(score_list), np.concatenate(collision_mask_list) + + # slice grasp_list to TOP_K with highest score + # sort in scene level + grasp_confidence = grasp_list[:,0] + indices = np.argsort(-grasp_confidence) + grasp_list, score_list, collision_mask_list = grasp_list[indices], score_list[indices], collision_mask_list[indices] + + grasp_list_list.append(grasp_list) + score_list_list.append(score_list) + collision_list_list.append(collision_mask_list) + + # slice top k from grasp_list + if len(grasp_list) > TOP_K: + grasp_list = grasp_list[:TOP_K] + score_list = score_list[:TOP_K] + collision_mask_list = collision_mask_list[:TOP_K] if vis: t = o3d.geometry.PointCloud() @@ -178,14 +201,14 @@ def eval_scene(self, scene_id, dump_folder, TOP_K = 50, return_list = False,vis o3d.visualization.draw_geometries([pcd, *grasps_geometry, *model_list]) o3d.visualization.draw_geometries([*grasps_geometry, *model_list, t]) - # sort in scene level - grasp_confidence = grasp_list[:,0] - indices = np.argsort(-grasp_confidence) - grasp_list, score_list, collision_mask_list = grasp_list[indices], score_list[indices], collision_mask_list[indices] + # # sort in scene level + # grasp_confidence = grasp_list[:,0] + # indices = np.argsort(-grasp_confidence) + # grasp_list, score_list, collision_mask_list = grasp_list[indices], score_list[indices], collision_mask_list[indices] - grasp_list_list.append(grasp_list) - score_list_list.append(score_list) - collision_list_list.append(collision_mask_list) + # grasp_list_list.append(grasp_list) + # score_list_list.append(score_list) + # collision_list_list.append(collision_mask_list) #calculate AP grasp_accuracy = np.zeros((TOP_K,len(list_coe_of_friction))) @@ -203,7 +226,7 @@ def eval_scene(self, scene_id, dump_folder, TOP_K = 50, return_list = False,vis else: return scene_accuracy, grasp_list_list, score_list_list, collision_list_list - def parallel_eval_scenes(self, scene_ids, dump_folder, proc = 2): + def parallel_eval_scenes(self, scene_ids, dump_folder, proc = 2, skip_ann = 0, top_k = 50, vis = False): ''' **Input:** @@ -217,11 +240,17 @@ def parallel_eval_scenes(self, scene_ids, dump_folder, proc = 2): - scene_acc_list: list of the scene accuracy. ''' + if proc == 1: + res_list = [] + for secne_id in scene_ids: + res_list.append(self.eval_scene(secne_id, dump_folder, TOP_K = top_k, skip_ann = skip_ann, vis = vis)) + return res_list + from multiprocessing import Pool p = Pool(processes = proc) res_list = [] for scene_id in scene_ids: - res_list.append(p.apply_async(self.eval_scene, (scene_id, dump_folder))) + res_list.append(p.apply_async(self.eval_scene, (scene_id, dump_folder, top_k, skip_ann, vis))) p.close() p.join() scene_acc_list = [] diff --git a/graspnetAPI/utils/utils.py b/graspnetAPI/utils/utils.py index fae9f77..534add5 100755 --- a/graspnetAPI/utils/utils.py +++ b/graspnetAPI/utils/utils.py @@ -104,7 +104,7 @@ def generate_scene_model(dataset_root, scene_name, anno_idx, return_poses=False, camera_pose = camera_poses[anno_idx] align_mat = np.load(os.path.join(dataset_root, 'scenes', scene_name, camera, 'cam0_wrt_table.npy')) camera_pose = np.matmul(align_mat,camera_pose) - print('Scene {}, {}'.format(scene_name, camera)) + # print('Scene {}, {}'.format(scene_name, camera)) scene_reader = xmlReader(os.path.join(dataset_root, 'scenes', scene_name, camera, 'annotations', '%04d.xml'%anno_idx)) posevectors = scene_reader.getposevectorlist() obj_list = []