Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Move away from deprecated methods #337

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions torchsparse/backend/convolution/convolution_gather_scatter_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -547,7 +547,7 @@ at::Tensor conv_forward_gather_scatter_cuda_latest(

// all gather
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", [&] {
gather_all_kernel_pad_sep_with_mask<scalar_t>
<<<ceil((double)(n_in_feats * n_in_channels) /
(256 << (sizeof(scalar_t) == 2) + 2)),
Expand All @@ -560,7 +560,7 @@ at::Tensor conv_forward_gather_scatter_cuda_latest(
cum_buffer_sizes_gpu.data_ptr<int>(),
input_mask.data_ptr<int>(), output_mask.data_ptr<int>(),
transpose, precompute_mid);
}));
});

at::Tensor in_buffer_activated, out_buffer_activated, kernel_buffer;
int buffer_st;
Expand Down Expand Up @@ -779,14 +779,14 @@ at::Tensor conv_forward_gather_scatter_cuda_fallback(
// gather n_active_feats dense features from N sparse input features with c
// feature dimensions
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", [&] {
gather_kernel<scalar_t>
<<<ceil((double)(n_active_feats * n_in_channels) / 256), 256>>>(
n_active_feats, n_in_feats, n_in_channels,
in_feat.data_ptr<scalar_t>(),
in_buffer_activated.data_ptr<scalar_t>(),
neighbor_map.data_ptr<int>() + cur_offset, transpose);
}));
});
// gemm: (i, c) X (c, o) = (i, o)
int kmap_idx = i;
if (conv_mode == 2) {
Expand All @@ -796,14 +796,14 @@ at::Tensor conv_forward_gather_scatter_cuda_fallback(
// scatter n_active_feats dense features into n_out_feats output features of
// dimension n_out_channels
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", [&] {
scatter_kernel<scalar_t>
<<<ceil((double)(n_active_feats * n_out_channels) / 256), 256>>>(
n_active_feats, n_out_feats, n_out_channels,
out_buffer_activated.data_ptr<scalar_t>(),
out_feat.data_ptr<scalar_t>(),
neighbor_map.data_ptr<int>() + cur_offset, transpose);
}));
});
cur_offset += 2 * n_active_feats;
}

Expand Down Expand Up @@ -877,23 +877,23 @@ void conv_backward_gather_scatter_cuda(at::Tensor in_feat, at::Tensor grad_in_fe
}
// gather
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", [&] {
gather_kernel<scalar_t>
<<<ceil((double)(n_active_feats * n_out_channels) / 256), 256>>>(
n_active_feats, n_out_feats, n_out_channels,
grad_out_feat.data_ptr<scalar_t>(),
out_grad_buffer_activated.data_ptr<scalar_t>(),
neighbor_map.data_ptr<int>() + cur_offset, !transpose);
}));
});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
in_feat.type(), "conv_forward_gather_scatter_cuda", [&] {
gather_kernel<scalar_t>
<<<ceil((double)(n_active_feats * n_in_channels) / 256), 256>>>(
n_active_feats, n_in_feats, n_in_channels,
in_feat.data_ptr<scalar_t>(),
in_buffer_activated.data_ptr<scalar_t>(),
neighbor_map.data_ptr<int>() + cur_offset, transpose);
}));
});
// gemm
torch::mm_out(in_grad_buffer_activated, out_grad_buffer_activated,
torch::transpose(kernel[i], 0, 1));
Expand All @@ -902,14 +902,14 @@ void conv_backward_gather_scatter_cuda(at::Tensor in_feat, at::Tensor grad_in_fe
out_grad_buffer_activated);
// scatter
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
in_feat.type(), "conv_forward_gather_scatter_cuda", ([&] {
in_feat.scalar_type(), "conv_forward_gather_scatter_cuda", [&] {
scatter_kernel<scalar_t>
<<<ceil((double)(n_active_feats * n_in_channels) / 256), 256>>>(
n_active_feats, n_in_feats, n_in_channels,
in_grad_buffer_activated.data_ptr<scalar_t>(),
grad_in_feat.data_ptr<scalar_t>(),
neighbor_map.data_ptr<int>() + cur_offset, !transpose);
}));
});
cur_offset += 2 * n_active_feats;
}
}
8 changes: 4 additions & 4 deletions torchsparse/backend/devoxelize/devoxelize_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ at::Tensor devoxelize_forward_cuda(const at::Tensor feat,
torch::zeros({N, c}, at::device(feat.device()).dtype(feat.dtype()));

AT_DISPATCH_FLOATING_TYPES_AND_HALF(
feat.type(), "devoxelize_forward_cuda", ([&] {
feat.scalar_type(), "devoxelize_forward_cuda", [&] {
devoxelize_forward_kernel<scalar_t><<<N, c>>>(
N, c, indices.data_ptr<int>(), weight.data_ptr<scalar_t>(),
feat.data_ptr<scalar_t>(), out.data_ptr<scalar_t>());
}));
});

return out;
}
Expand All @@ -88,11 +88,11 @@ at::Tensor devoxelize_backward_cuda(const at::Tensor top_grad,
{n, c}, at::device(top_grad.device()).dtype(top_grad.dtype()));

AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.type(), "devoxelize_backward_cuda", ([&] {
top_grad.scalar_type(), "devoxelize_backward_cuda", [&] {
devoxelize_backward_kernel<scalar_t><<<N, c>>>(
N, n, c, indices.data_ptr<int>(), weight.data_ptr<scalar_t>(),
top_grad.data_ptr<scalar_t>(), bottom_grad.data_ptr<scalar_t>());
}));
});

return bottom_grad;
}
16 changes: 8 additions & 8 deletions torchsparse/backend/voxelize/voxelize_cuda.cu
Original file line number Diff line number Diff line change
Expand Up @@ -86,10 +86,10 @@ at::Tensor voxelize_forward_cuda(const at::Tensor inputs, const at::Tensor idx,
torch::zeros({N1, c}, at::device(idx.device()).dtype(inputs.dtype()));

AT_DISPATCH_FLOATING_TYPES_AND_HALF(
inputs.type(), "voxelize_forward_cuda", ([&]
inputs.scalar_type(), "voxelize_forward_cuda", [&]
{ voxelize_forward_kernel<scalar_t><<<N, c>>>(
N, c, N1, inputs.data_ptr<scalar_t>(), idx.data_ptr<int>(),
counts.data_ptr<int>(), out.data_ptr<scalar_t>()); }));
counts.data_ptr<int>(), out.data_ptr<scalar_t>()); });

return out;
}
Expand All @@ -105,10 +105,10 @@ at::Tensor voxelize_backward_cuda(const at::Tensor top_grad,
torch::zeros({N, c}, at::device(idx.device()).dtype(top_grad.dtype()));

AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.type(), "voxelize_backward_cuda", ([&]
top_grad.scalar_type(), "voxelize_backward_cuda", [&]
{ voxelize_backward_kernel<scalar_t><<<N, c>>>(
N, c, N1, top_grad.data_ptr<scalar_t>(), idx.data_ptr<int>(),
counts.data_ptr<int>(), bottom_grad.data_ptr<scalar_t>()); }));
counts.data_ptr<int>(), bottom_grad.data_ptr<scalar_t>()); });

return bottom_grad;
}
Expand All @@ -120,10 +120,10 @@ void to_dense_forward_cuda(const at::Tensor inputs, const at::Tensor idx,
int c = inputs.size(1);

AT_DISPATCH_FLOATING_TYPES_AND_HALF(
inputs.type(), "to_dense_forward_cuda", ([&]
inputs.scalar_type(), "to_dense_forward_cuda", [&]
{ to_dense_forward_kernel<scalar_t><<<(N * c + 255) / 256, 256>>>(
N, c, inputs.data_ptr<scalar_t>(), idx.data_ptr<int>(),
range.data_ptr<int>(), outputs.data_ptr<scalar_t>()); }));
range.data_ptr<int>(), outputs.data_ptr<scalar_t>()); });
}

void to_dense_backward_cuda(const at::Tensor top_grad,
Expand All @@ -134,8 +134,8 @@ void to_dense_backward_cuda(const at::Tensor top_grad,
int c = bottom_grad.size(1);

AT_DISPATCH_FLOATING_TYPES_AND_HALF(
top_grad.type(), "to_dense_backward_cuda", ([&]
top_grad.scalar_type(), "to_dense_backward_cuda", [&]
{ to_dense_backward_kernel<scalar_t><<<(N * c + 255) / 256, 256>>>(
N, c, top_grad.data_ptr<scalar_t>(), idx.data_ptr<int>(),
range.data_ptr<int>(), bottom_grad.data_ptr<scalar_t>()); }));
range.data_ptr<int>(), bottom_grad.data_ptr<scalar_t>()); });
}