diff --git a/mmdet3d/evaluation/metrics/kitti_metric.py b/mmdet3d/evaluation/metrics/kitti_metric.py index 0a48ad2282..493f1b07bc 100644 --- a/mmdet3d/evaluation/metrics/kitti_metric.py +++ b/mmdet3d/evaluation/metrics/kitti_metric.py @@ -168,7 +168,7 @@ def compute_metrics(self, results: list) -> Dict[str, float]: """Compute the metrics from processed results. Args: - results (list): The processed results of each batch. + results (list): The processed results of the whole dataset. Returns: Dict[str, float]: The computed metrics. The keys are the names of diff --git a/mmdet3d/models/data_preprocessors/data_preprocessor.py b/mmdet3d/models/data_preprocessors/data_preprocessor.py index db04e93802..fd5417039f 100644 --- a/mmdet3d/models/data_preprocessors/data_preprocessor.py +++ b/mmdet3d/models/data_preprocessors/data_preprocessor.py @@ -104,10 +104,10 @@ def forward( ``BaseDataPreprocessor``. Args: - data (List[dict] | List[List[dict]]): data from dataloader. - The outer list always represent the batch size, when it is - a list[list[dict]], the inter list indicate test time - augmentation. + data (dict | List[dict]): data from dataloader. + The dict contains the whole batch data, when it is + a list[dict], the list indicate test time augmentation. + training (bool): Whether to enable training time augmentation. Defaults to False. @@ -293,7 +293,7 @@ def _get_pad_shape(self, data: dict) -> List[tuple]: else: raise TypeError('Output of `cast_data` should be a list of dict ' 'or a tuple with inputs and data_samples, but got' - f'{type(data)}: {data}') + f'{type(data)}: {data}') return batch_pad_shape @torch.no_grad() diff --git a/mmdet3d/visualization/local_visualizer.py b/mmdet3d/visualization/local_visualizer.py index 52415c0478..228c91a4b5 100644 --- a/mmdet3d/visualization/local_visualizer.py +++ b/mmdet3d/visualization/local_visualizer.py @@ -606,9 +606,8 @@ def add_datasample(self, win_name=name, wait_time=wait_time) - mkdir_or_exist(out_file) - if out_file is not None: + mkdir_or_exist(out_file) if drawn_img_3d is not None: mmcv.imwrite(drawn_img_3d[..., ::-1], out_file + '.jpg') if drawn_img is not None: diff --git a/requirements/optional.txt b/requirements/optional.txt index 922f07f678..ea655409e8 100644 --- a/requirements/optional.txt +++ b/requirements/optional.txt @@ -1,2 +1,4 @@ +black==20.8b1 # be compatible with typing-extensions 3.7.4 spconv -waymo-open-dataset-tf-2-1-0==1.2.0 +typing-extensions==3.7.4 # required by tensorflow before version 2.6 +waymo-open-dataset-tf-2-4-0 diff --git a/requirements/runtime.txt b/requirements/runtime.txt index 6d45314cb0..16ec2ffc90 100644 --- a/requirements/runtime.txt +++ b/requirements/runtime.txt @@ -1,5 +1,5 @@ lyft_dataset_sdk -networkx>=2.2,<2.3 +networkx>=2.5 numba==0.53.0 numpy nuscenes-devkit diff --git a/tests/data/kitti/kitti_infos_train.pkl b/tests/data/kitti/kitti_infos_train.pkl index 28cf32a048..668606d990 100644 Binary files a/tests/data/kitti/kitti_infos_train.pkl and b/tests/data/kitti/kitti_infos_train.pkl differ diff --git a/tests/data/lyft/lyft_infos.pkl b/tests/data/lyft/lyft_infos.pkl index 76ebad7eca..453eaf69e5 100644 Binary files a/tests/data/lyft/lyft_infos.pkl and b/tests/data/lyft/lyft_infos.pkl differ diff --git a/tests/data/nuscenes/nus_info.pkl b/tests/data/nuscenes/nus_info.pkl index b768edb57a..e1976f2002 100644 Binary files a/tests/data/nuscenes/nus_info.pkl and b/tests/data/nuscenes/nus_info.pkl differ diff --git a/tests/test_models/test_dense_heads/test_fcos_mono3d_head.py b/tests/test_models/test_dense_heads/test_fcos_mono3d_head.py index 68bdad74b0..47ad88c201 100644 --- a/tests/test_models/test_dense_heads/test_fcos_mono3d_head.py +++ b/tests/test_models/test_dense_heads/test_fcos_mono3d_head.py @@ -44,9 +44,9 @@ def test_fcos_mono3d_head_loss(self): fcos_mono3d_head = FCOSMono3DHead( num_classes=10, - in_channels=256, + in_channels=32, stacked_convs=2, - feat_channels=256, + feat_channels=32, use_direction_classifier=True, diff_rad_by_sin=True, pred_attrs=True, @@ -55,16 +55,16 @@ def test_fcos_mono3d_head_loss(self): dir_limit_offset=0, strides=[8, 16, 32, 64, 128], group_reg_dims=(2, 1, 3, 1, 2), # offset, depth, size, rot, velo - cls_branch=(256, ), + cls_branch=(32, ), reg_branch=( - (256, ), # offset - (256, ), # depth - (256, ), # size - (256, ), # rot + (32, ), # offset + (32, ), # depth + (32, ), # size + (32, ), # rot () # velo ), - dir_branch=(256, ), - attr_branch=(256, ), + dir_branch=(32, ), + attr_branch=(32, ), loss_cls=dict( type='mmdet.FocalLoss', use_sigmoid=True, @@ -96,11 +96,11 @@ def test_fcos_mono3d_head_loss(self): # FCOS3D head expects a multiple levels of features per image feats = [ - torch.rand([1, 256, 116, 200], dtype=torch.float32), - torch.rand([1, 256, 58, 100], dtype=torch.float32), - torch.rand([1, 256, 29, 50], dtype=torch.float32), - torch.rand([1, 256, 15, 25], dtype=torch.float32), - torch.rand([1, 256, 8, 13], dtype=torch.float32) + torch.rand([1, 32, 116, 200], dtype=torch.float32), + torch.rand([1, 32, 58, 100], dtype=torch.float32), + torch.rand([1, 32, 29, 50], dtype=torch.float32), + torch.rand([1, 32, 15, 25], dtype=torch.float32), + torch.rand([1, 32, 8, 13], dtype=torch.float32) ] # Test forward diff --git a/tests/test_models/test_dense_heads/test_freeanchors.py b/tests/test_models/test_dense_heads/test_freeanchors.py index 9073c16f06..65bfb4acbf 100644 --- a/tests/test_models/test_dense_heads/test_freeanchors.py +++ b/tests/test_models/test_dense_heads/test_freeanchors.py @@ -19,6 +19,14 @@ def test_freeanchor(self): freeanchor_cfg = _get_detector_cfg( 'free_anchor/pointpillars_hv_regnet-1.6gf_fpn_head-free-anchor' '_sbn-all_8xb4-2x_nus-3d.py') + # decrease channels to reduce cuda memory. + freeanchor_cfg.pts_voxel_encoder.feat_channels = [1, 1] + freeanchor_cfg.pts_middle_encoder.in_channels = 1 + freeanchor_cfg.pts_backbone.base_channels = 1 + freeanchor_cfg.pts_backbone.stem_channels = 1 + freeanchor_cfg.pts_neck.out_channels = 1 + freeanchor_cfg.pts_bbox_head.feat_channels = 1 + freeanchor_cfg.pts_bbox_head.in_channels = 1 model = MODELS.build(freeanchor_cfg) num_gt_instance = 3 packed_inputs = _create_detector_inputs( diff --git a/tests/test_models/test_dense_heads/test_ssn.py b/tests/test_models/test_dense_heads/test_ssn.py index 8010f1d1b8..d496284c41 100644 --- a/tests/test_models/test_dense_heads/test_ssn.py +++ b/tests/test_models/test_dense_heads/test_ssn.py @@ -18,6 +18,14 @@ def test_ssn(self): _setup_seed(0) ssn_cfg = _get_detector_cfg( 'ssn/ssn_hv_secfpn_sbn-all_16xb2-2x_nus-3d.py') + ssn_cfg.pts_voxel_encoder.feat_channels = [1, 1] + ssn_cfg.pts_middle_encoder.in_channels = 1 + ssn_cfg.pts_backbone.in_channels = 1 + ssn_cfg.pts_backbone.out_channels = [1, 1, 1] + ssn_cfg.pts_neck.in_channels = [1, 1, 1] + ssn_cfg.pts_neck.out_channels = [1, 1, 1] + ssn_cfg.pts_bbox_head.in_channels = 3 + ssn_cfg.pts_bbox_head.feat_channels = 1 model = MODELS.build(ssn_cfg) num_gt_instance = 50 packed_inputs = _create_detector_inputs(