diff --git a/src/layer/x86/convolution_1x1_pack16to1.h b/src/layer/x86/convolution_1x1_pack16to1.h new file mode 100644 index 00000000000..a5e415c3e0e --- /dev/null +++ b/src/layer/x86/convolution_1x1_pack16to1.h @@ -0,0 +1,65 @@ +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +static void conv1x1s1_sgemm_pack16to1_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) +{ + int w = bottom_blob.w; + int h = bottom_blob.h; + const int size = w * h; + + Mat bottom_im2col = bottom_blob; + bottom_im2col.w = size; + bottom_im2col.h = 1; + + im2col_sgemm_pack16to1_avx512(bottom_im2col, top_blob, kernel, _bias, opt); +} + +static void conv1x1s2_sgemm_pack16to1_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) +{ + int w = bottom_blob.w; + int channels = bottom_blob.c; + size_t elemsize = bottom_blob.elemsize; + int elempack = bottom_blob.elempack; + + int outw = top_blob.w; + int outh = top_blob.h; + + const int tailstep = (w - 2 * outw + w) * 16; + + Mat bottom_blob_shrinked; + bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); + + #pragma omp parallel for num_threads(opt.num_threads) + for (int p = 0; p < channels; p++) + { + const float* r0 = bottom_blob.channel(p); + float* outptr = bottom_blob_shrinked.channel(p); + + for (int i = 0; i < outh; i++) + { + for (int j = 0; j < outw; j++) + { + __m512 _v = _mm512_load_ps(r0); + _mm512_store_ps(outptr, _v); + + r0 += 32; + outptr += 16; + } + + r0 += tailstep; + } + } + + conv1x1s1_sgemm_pack16to1_avx512(bottom_blob_shrinked, top_blob, kernel, _bias, opt); +} diff --git a/src/layer/x86/convolution_1x1_pack4to16.h b/src/layer/x86/convolution_1x1_pack4to16.h new file mode 100644 index 00000000000..28f81377484 --- /dev/null +++ b/src/layer/x86/convolution_1x1_pack4to16.h @@ -0,0 +1,66 @@ +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +static void conv1x1s1_sgemm_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) +{ + int w = bottom_blob.w; + int h = bottom_blob.h; + const int size = w * h; + + Mat bottom_im2col = bottom_blob; + bottom_im2col.w = size; + bottom_im2col.h = 1; + + im2col_sgemm_pack4to16_avx512(bottom_im2col, top_blob, kernel, _bias, opt); +} + +static void conv1x1s2_sgemm_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) +{ + int w = bottom_blob.w; + int channels = bottom_blob.c; + size_t elemsize = bottom_blob.elemsize; + int elempack = bottom_blob.elempack; + + int outw = top_blob.w; + int outh = top_blob.h; + + const int tailstep = (w - 2 * outw + w) * 4; + + Mat bottom_blob_shrinked; + bottom_blob_shrinked.create(outw, outh, channels, elemsize, elempack, opt.workspace_allocator); + + #pragma omp parallel for num_threads(opt.num_threads) + for (int p = 0; p < channels; p++) + { + const float* r0 = bottom_blob.channel(p); + float* outptr = bottom_blob_shrinked.channel(p); + + for (int i = 0; i < outh; i++) + { + int j = 0; + for (; j < outw; j++) + { + __m128 _v = _mm_load_ps(r0); + _mm_store_ps(outptr, _v); + + r0 += 8; + outptr += 4; + } + + r0 += tailstep; + } + } + + conv1x1s1_sgemm_pack4to16_avx512(bottom_blob_shrinked, top_blob, kernel, _bias, opt); +} diff --git a/src/layer/x86/convolution_sgemm_pack16to1.h b/src/layer/x86/convolution_sgemm_pack16to1.h new file mode 100644 index 00000000000..e09e9544c10 --- /dev/null +++ b/src/layer/x86/convolution_sgemm_pack16to1.h @@ -0,0 +1,556 @@ +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +static void im2col_sgemm_pack16to1_avx512(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) +{ + // Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator); + + const int size = bottom_im2col.w; + const int maxk = bottom_im2col.h; + const int inch = bottom_im2col.c; + + const int outch = top_blob.c; + + const float* bias = _bias; + + Mat tmp; + if (size >= 16) + tmp.create(16 * maxk, inch, size / 16 + (size % 16) / 8 + size % 8, 64u, 16, opt.workspace_allocator); + else if (size >= 8) + tmp.create(8 * maxk, inch, size / 8 + size % 8, 64u, 16, opt.workspace_allocator); + else + tmp.create(maxk, inch, size, 64u, 16, opt.workspace_allocator); + { + int remain_size_start = 0; + int nn_size = size >> 4; + + #pragma omp parallel for num_threads(opt.num_threads) + for (int ii = 0; ii < nn_size; ii++) + { + int i = ii * 16; + + float* tmpptr = tmp.channel(i / 16); + + for (int q = 0; q < inch; q++) + { + const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16; + + for (int k = 0; k < maxk; k++) + { + // transpose 16x16 + __m512 _r0 = _mm512_loadu_ps(img0); + __m512 _r1 = _mm512_loadu_ps(img0 + 16); + __m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2); + __m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3); + __m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4); + __m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5); + __m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6); + __m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7); + __m512 _r8 = _mm512_loadu_ps(img0 + 16 * 8); + __m512 _r9 = _mm512_loadu_ps(img0 + 16 * 9); + __m512 _ra = _mm512_loadu_ps(img0 + 16 * 10); + __m512 _rb = _mm512_loadu_ps(img0 + 16 * 11); + __m512 _rc = _mm512_loadu_ps(img0 + 16 * 12); + __m512 _rd = _mm512_loadu_ps(img0 + 16 * 13); + __m512 _re = _mm512_loadu_ps(img0 + 16 * 14); + __m512 _rf = _mm512_loadu_ps(img0 + 16 * 15); + + transpose16_ps(_r0, _r1, _r2, _r3, _r4, _r5, _r6, _r7, _r8, _r9, _ra, _rb, _rc, _rd, _re, _rf); + + _mm512_storeu_ps(tmpptr, _r0); + _mm512_storeu_ps(tmpptr + 16, _r1); + _mm512_storeu_ps(tmpptr + 16 * 2, _r2); + _mm512_storeu_ps(tmpptr + 16 * 3, _r3); + _mm512_storeu_ps(tmpptr + 16 * 4, _r4); + _mm512_storeu_ps(tmpptr + 16 * 5, _r5); + _mm512_storeu_ps(tmpptr + 16 * 6, _r6); + _mm512_storeu_ps(tmpptr + 16 * 7, _r7); + _mm512_storeu_ps(tmpptr + 16 * 8, _r8); + _mm512_storeu_ps(tmpptr + 16 * 9, _r9); + _mm512_storeu_ps(tmpptr + 16 * 10, _ra); + _mm512_storeu_ps(tmpptr + 16 * 11, _rb); + _mm512_storeu_ps(tmpptr + 16 * 12, _rc); + _mm512_storeu_ps(tmpptr + 16 * 13, _rd); + _mm512_storeu_ps(tmpptr + 16 * 14, _re); + _mm512_storeu_ps(tmpptr + 16 * 15, _rf); + + img0 += size * 16; + tmpptr += 256; + } + } + } + + remain_size_start += nn_size << 4; + nn_size = (size - remain_size_start) >> 3; + + #pragma omp parallel for num_threads(opt.num_threads) + for (int ii = 0; ii < nn_size; ii++) + { + int i = remain_size_start + ii * 8; + + float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); + + for (int q = 0; q < inch; q++) + { + const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16; + + for (int k = 0; k < maxk; k++) + { + // transpose 16x8 + __m512 _r0 = _mm512_loadu_ps(img0); + __m512 _r1 = _mm512_loadu_ps(img0 + 16); + __m512 _r2 = _mm512_loadu_ps(img0 + 16 * 2); + __m512 _r3 = _mm512_loadu_ps(img0 + 16 * 3); + __m512 _r4 = _mm512_loadu_ps(img0 + 16 * 4); + __m512 _r5 = _mm512_loadu_ps(img0 + 16 * 5); + __m512 _r6 = _mm512_loadu_ps(img0 + 16 * 6); + __m512 _r7 = _mm512_loadu_ps(img0 + 16 * 7); + + __m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1); + __m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1); + __m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3); + __m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3); + __m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5); + __m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5); + __m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7); + __m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7); + + __m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2)); + __m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0)); + __m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2)); + + _tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0)); + _tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0)); + _tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0)); + _tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0)); + _tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1)); + _tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1)); + _tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1)); + _tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1)); + + _r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0)); + _r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0)); + _r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0)); + _r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0)); + _r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1)); + _r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1)); + _r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1)); + _r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1)); + + _mm512_storeu_ps(tmpptr, _r0); + _mm512_storeu_ps(tmpptr + 16, _r1); + _mm512_storeu_ps(tmpptr + 16 * 2, _r2); + _mm512_storeu_ps(tmpptr + 16 * 3, _r3); + _mm512_storeu_ps(tmpptr + 16 * 4, _r4); + _mm512_storeu_ps(tmpptr + 16 * 5, _r5); + _mm512_storeu_ps(tmpptr + 16 * 6, _r6); + _mm512_storeu_ps(tmpptr + 16 * 7, _r7); + + img0 += size * 16; + tmpptr += 128; + } + } + } + + remain_size_start += nn_size << 3; + + #pragma omp parallel for num_threads(opt.num_threads) + for (int i = remain_size_start; i < size; i++) + { + float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + i % 8); + + for (int q = 0; q < inch; q++) + { + const float* img0 = (const float*)bottom_im2col.channel(q) + i * 16; + + for (int k = 0; k < maxk; k++) + { + __m512 _val = _mm512_load_ps(img0); + _mm512_store_ps(tmpptr, _val); + + img0 += size * 16; + tmpptr += 16; + } + } + } + } + + int nn_outch = outch / 8; + int remain_outch_start = nn_outch * 8; + + #pragma omp parallel for num_threads(opt.num_threads) + for (int pp = 0; pp < nn_outch; pp++) + { + int p = pp * 8; + + float* outptr0 = top_blob.channel(p); + float* outptr1 = top_blob.channel(p + 1); + float* outptr2 = top_blob.channel(p + 2); + float* outptr3 = top_blob.channel(p + 3); + float* outptr4 = top_blob.channel(p + 4); + float* outptr5 = top_blob.channel(p + 5); + float* outptr6 = top_blob.channel(p + 6); + float* outptr7 = top_blob.channel(p + 7); + + const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; + const float* biasptr = bias ? bias + p : zeros; + + int i = 0; + for (; i + 15 < size; i += 16) + { + const float* tmpptr = tmp.channel(i / 16); + const float* kptr = kernel.channel(p / 8); + + int nn = inch * maxk * 16; // inch always > 0 + + __m512 _sum0 = _mm512_set1_ps(biasptr[0]); + __m512 _sum1 = _mm512_set1_ps(biasptr[1]); + __m512 _sum2 = _mm512_set1_ps(biasptr[2]); + __m512 _sum3 = _mm512_set1_ps(biasptr[3]); + __m512 _sum4 = _mm512_set1_ps(biasptr[4]); + __m512 _sum5 = _mm512_set1_ps(biasptr[5]); + __m512 _sum6 = _mm512_set1_ps(biasptr[6]); + __m512 _sum7 = _mm512_set1_ps(biasptr[7]); + + for (int j = 0; j < nn; j++) + { + __m512 _val0 = _mm512_load_ps(tmpptr); + + __m512 _w0 = _mm512_set1_ps(kptr[0]); + __m512 _w1 = _mm512_set1_ps(kptr[1]); + _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); + _sum1 = _mm512_fmadd_ps(_val0, _w1, _sum1); + __m512 _w2 = _mm512_set1_ps(kptr[2]); + __m512 _w3 = _mm512_set1_ps(kptr[3]); + _sum2 = _mm512_fmadd_ps(_val0, _w2, _sum2); + _sum3 = _mm512_fmadd_ps(_val0, _w3, _sum3); + __m512 _w4 = _mm512_set1_ps(kptr[4]); + __m512 _w5 = _mm512_set1_ps(kptr[5]); + _sum4 = _mm512_fmadd_ps(_val0, _w4, _sum4); + _sum5 = _mm512_fmadd_ps(_val0, _w5, _sum5); + __m512 _w6 = _mm512_set1_ps(kptr[6]); + __m512 _w7 = _mm512_set1_ps(kptr[7]); + _sum6 = _mm512_fmadd_ps(_val0, _w6, _sum6); + _sum7 = _mm512_fmadd_ps(_val0, _w7, _sum7); + + tmpptr += 16; + kptr += 8; + } + + _mm512_storeu_ps(outptr0, _sum0); + _mm512_storeu_ps(outptr1, _sum1); + _mm512_storeu_ps(outptr2, _sum2); + _mm512_storeu_ps(outptr3, _sum3); + _mm512_storeu_ps(outptr4, _sum4); + _mm512_storeu_ps(outptr5, _sum5); + _mm512_storeu_ps(outptr6, _sum6); + _mm512_storeu_ps(outptr7, _sum7); + + outptr0 += 16; + outptr1 += 16; + outptr2 += 16; + outptr3 += 16; + outptr4 += 16; + outptr5 += 16; + outptr6 += 16; + outptr7 += 16; + } + for (; i + 7 < size; i += 8) + { + const float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); + const float* kptr = kernel.channel(p / 8); + + int nn = inch * maxk * 16; // inch always > 0 + + __m256 _sum0 = _mm256_broadcast_ss(biasptr); + __m256 _sum1 = _mm256_broadcast_ss(biasptr + 1); + __m256 _sum2 = _mm256_broadcast_ss(biasptr + 2); + __m256 _sum3 = _mm256_broadcast_ss(biasptr + 3); + __m256 _sum4 = _mm256_broadcast_ss(biasptr + 4); + __m256 _sum5 = _mm256_broadcast_ss(biasptr + 5); + __m256 _sum6 = _mm256_broadcast_ss(biasptr + 6); + __m256 _sum7 = _mm256_broadcast_ss(biasptr + 7); + + for (int j = 0; j < nn; j++) + { + __m256 _val0 = _mm256_load_ps(tmpptr); + + __m256 _w0 = _mm256_broadcast_ss(kptr); + __m256 _w1 = _mm256_broadcast_ss(kptr + 1); + _sum0 = _mm256_fmadd_ps(_val0, _w0, _sum0); + _sum1 = _mm256_fmadd_ps(_val0, _w1, _sum1); + __m256 _w2 = _mm256_broadcast_ss(kptr + 2); + __m256 _w3 = _mm256_broadcast_ss(kptr + 3); + _sum2 = _mm256_fmadd_ps(_val0, _w2, _sum2); + _sum3 = _mm256_fmadd_ps(_val0, _w3, _sum3); + __m256 _w4 = _mm256_broadcast_ss(kptr + 4); + __m256 _w5 = _mm256_broadcast_ss(kptr + 5); + _sum4 = _mm256_fmadd_ps(_val0, _w4, _sum4); + _sum5 = _mm256_fmadd_ps(_val0, _w5, _sum5); + __m256 _w6 = _mm256_broadcast_ss(kptr + 6); + __m256 _w7 = _mm256_broadcast_ss(kptr + 7); + _sum6 = _mm256_fmadd_ps(_val0, _w6, _sum6); + _sum7 = _mm256_fmadd_ps(_val0, _w7, _sum7); + + tmpptr += 8; + kptr += 8; + } + + _mm256_storeu_ps(outptr0, _sum0); + _mm256_storeu_ps(outptr1, _sum1); + _mm256_storeu_ps(outptr2, _sum2); + _mm256_storeu_ps(outptr3, _sum3); + _mm256_storeu_ps(outptr4, _sum4); + _mm256_storeu_ps(outptr5, _sum5); + _mm256_storeu_ps(outptr6, _sum6); + _mm256_storeu_ps(outptr7, _sum7); + + outptr0 += 8; + outptr1 += 8; + outptr2 += 8; + outptr3 += 8; + outptr4 += 8; + outptr5 += 8; + outptr6 += 8; + outptr7 += 8; + } + for (; i < size; i++) + { + const float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + i % 8); + const float* kptr = kernel.channel(p / 8); + + int nn = inch * maxk * 16; // inch always > 0 + + __m256 _sum = _mm256_loadu_ps(biasptr); + + for (int j = 0; j < nn; j++) + { + __m256 _val0 = _mm256_broadcast_ss(tmpptr); + __m256 _w0 = _mm256_load_ps(kptr); + _sum = _mm256_fmadd_ps(_val0, _w0, _sum); + + tmpptr += 1; + kptr += 8; + } + + float sum[8]; + _mm256_storeu_ps(sum, _sum); + + outptr0[0] = sum[0]; + outptr1[0] = sum[1]; + outptr2[0] = sum[2]; + outptr3[0] = sum[3]; + outptr4[0] = sum[4]; + outptr5[0] = sum[5]; + outptr6[0] = sum[6]; + outptr7[0] = sum[7]; + + outptr0 += 1; + outptr1 += 1; + outptr2 += 1; + outptr3 += 1; + outptr4 += 1; + outptr5 += 1; + outptr6 += 1; + outptr7 += 1; + } + } + + #pragma omp parallel for num_threads(opt.num_threads) + for (int p = remain_outch_start; p < outch; p++) + { + float* outptr0 = top_blob.channel(p); + + const float bias0 = bias ? bias[p] : 0.f; + + int i = 0; + for (; i + 15 < size; i += 16) + { + const float* tmpptr = tmp.channel(i / 16); + const float* kptr = kernel.channel(p / 8 + p % 8); + + int nn = inch * maxk * 16; // inch always > 0 + + __m512 _sum0 = _mm512_set1_ps(bias0); + + for (int j = 0; j < nn; j++) + { + __m512 _val0 = _mm512_load_ps(tmpptr); + __m512 _w0 = _mm512_set1_ps(kptr[0]); + _sum0 = _mm512_fmadd_ps(_w0, _val0, _sum0); + + tmpptr += 16; + kptr += 1; + } + + _mm512_storeu_ps(outptr0, _sum0); + outptr0 += 16; + } + for (; i + 7 < size; i += 8) + { + const float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8); + const float* kptr = kernel.channel(p / 8 + p % 8); + + int nn = inch * maxk * 16; // inch always > 0 + + __m256 _sum0 = _mm256_set1_ps(bias0); + + for (int j = 0; j < nn; j++) + { + __m256 _val0 = _mm256_load_ps(tmpptr); + __m256 _w0 = _mm256_broadcast_ss(kptr); + _sum0 = _mm256_fmadd_ps(_w0, _val0, _sum0); + + tmpptr += 8; + kptr += 1; + } + + _mm256_storeu_ps(outptr0, _sum0); + outptr0 += 8; + } + for (; i < size; i++) + { + const float* tmpptr = tmp.channel(i / 16 + (i % 16) / 8 + i % 8); + const float* kptr = kernel.channel(p / 8 + p % 8); + + int nn = inch * maxk; // inch always > 0 + + float sum0 = bias0; + + __m512 _sum0 = _mm512_setzero_ps(); + + for (int j = 0; j < nn; j++) + { + __m512 _val0 = _mm512_load_ps(tmpptr); + __m512 _w0 = _mm512_load_ps(kptr); + _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); + + tmpptr += 16; + kptr += 16; + } + + sum0 += _mm512_reduce_add_ps(_sum0); + + outptr0[0] = sum0; + outptr0 += 1; + } + } +} + +static void convolution_im2col_sgemm_transform_kernel_pack16to1_avx512(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) +{ + const int maxk = kernel_w * kernel_h; + + // interleave + // src = maxk-inch-outch + // dst = pb-pa-maxk-inch/pa-outch/pb + Mat kernel = _kernel.reshape(maxk, inch, outch); + kernel_tm.create(8 * 16 * maxk, inch / 16, outch / 8 + outch % 8); + + int q = 0; + for (; q + 7 < outch; q += 8) + { + float* g00 = kernel_tm.channel(q / 8); + + for (int p = 0; p + 15 < inch; p += 16) + { + for (int k = 0; k < maxk; k++) + { + for (int i = 0; i < 16; i++) + { + for (int j = 0; j < 8; j++) + { + const float* k00 = kernel.channel(q + j).row(p + i); + g00[0] = k00[k]; + g00++; + } + } + } + } + } + for (; q < outch; q++) + { + const Mat k0 = kernel.channel(q); + + float* g00 = kernel_tm.channel(q / 8 + q % 8); + + for (int p = 0; p + 15 < inch; p += 16) + { + for (int k = 0; k < maxk; k++) + { + for (int i = 0; i < 16; i++) + { + const float* k00 = k0.row(p + i); + g00[0] = k00[k]; + g00++; + } + } + } + } +} + +static void convolution_im2col_sgemm_pack16to1_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) +{ + int w = bottom_blob.w; + int inch = bottom_blob.c; + + int outw = top_blob.w; + int outh = top_blob.h; + const int size = outw * outh; + + const int maxk = kernel_w * kernel_h; + + // im2col + Mat bottom_im2col(size, maxk, inch, 64u, 16, opt.workspace_allocator); + { + const int gap = (w * stride_h - outw * stride_w) * 16; + + #pragma omp parallel for num_threads(opt.num_threads) + for (int p = 0; p < inch; p++) + { + const Mat img = bottom_blob.channel(p); + float* ptr = bottom_im2col.channel(p); + + for (int u = 0; u < kernel_h; u++) + { + for (int v = 0; v < kernel_w; v++) + { + const float* sptr = img.row(dilation_h * u) + dilation_w * v * 16; + + for (int i = 0; i < outh; i++) + { + int j = 0; + for (; j < outw; j++) + { + __m512 _val = _mm512_load_ps(sptr); + _mm512_store_ps(ptr, _val); + + sptr += stride_w * 16; + ptr += 16; + } + + sptr += gap; + } + } + } + } + } + + im2col_sgemm_pack16to1_avx512(bottom_im2col, top_blob, kernel, _bias, opt); +} diff --git a/src/layer/x86/convolution_sgemm_pack16to4.h b/src/layer/x86/convolution_sgemm_pack16to4.h index a96996589b3..1930128c3d2 100644 --- a/src/layer/x86/convolution_sgemm_pack16to4.h +++ b/src/layer/x86/convolution_sgemm_pack16to4.h @@ -404,20 +404,20 @@ static void im2col_sgemm_pack16to4_avx512(const Mat& bottom_im2col, Mat& top_blo __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); - _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); - _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); + _sum0 = _mm256_fmadd_ps(_val0, _w0, _sum0); + _sum1 = _mm256_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val3 = _mm256_broadcast_ss(tmpptr + 3); - _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); - _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); + _sum2 = _mm256_fmadd_ps(_val2, _w0, _sum2); + _sum3 = _mm256_fmadd_ps(_val3, _w0, _sum3); __m256 _val4 = _mm256_broadcast_ss(tmpptr + 4); __m256 _val5 = _mm256_broadcast_ss(tmpptr + 5); - _sum4 = _mm256_comp_fmadd_ps(_val4, _w0, _sum4); - _sum5 = _mm256_comp_fmadd_ps(_val5, _w0, _sum5); + _sum4 = _mm256_fmadd_ps(_val4, _w0, _sum4); + _sum5 = _mm256_fmadd_ps(_val5, _w0, _sum5); __m256 _val6 = _mm256_broadcast_ss(tmpptr + 6); __m256 _val7 = _mm256_broadcast_ss(tmpptr + 7); - _sum6 = _mm256_comp_fmadd_ps(_val6, _w0, _sum6); - _sum7 = _mm256_comp_fmadd_ps(_val7, _w0, _sum7); + _sum6 = _mm256_fmadd_ps(_val6, _w0, _sum6); + _sum7 = _mm256_fmadd_ps(_val7, _w0, _sum7); tmpptr += 8; kptr += 8; @@ -461,12 +461,12 @@ static void im2col_sgemm_pack16to4_avx512(const Mat& bottom_im2col, Mat& top_blo __m256 _val0 = _mm256_broadcast_ss(tmpptr); __m256 _val1 = _mm256_broadcast_ss(tmpptr + 1); - _sum0 = _mm256_comp_fmadd_ps(_val0, _w0, _sum0); - _sum1 = _mm256_comp_fmadd_ps(_val1, _w0, _sum1); + _sum0 = _mm256_fmadd_ps(_val0, _w0, _sum0); + _sum1 = _mm256_fmadd_ps(_val1, _w0, _sum1); __m256 _val2 = _mm256_broadcast_ss(tmpptr + 2); __m256 _val3 = _mm256_broadcast_ss(tmpptr + 3); - _sum2 = _mm256_comp_fmadd_ps(_val2, _w0, _sum2); - _sum3 = _mm256_comp_fmadd_ps(_val3, _w0, _sum3); + _sum2 = _mm256_fmadd_ps(_val2, _w0, _sum2); + _sum3 = _mm256_fmadd_ps(_val3, _w0, _sum3); tmpptr += 4; kptr += 8; @@ -497,7 +497,7 @@ static void im2col_sgemm_pack16to4_avx512(const Mat& bottom_im2col, Mat& top_blo { __m256 _w0 = _mm256_load_ps(kptr); __m256 _val0 = _mm256_broadcast_ss(tmpptr); - _sum = _mm256_comp_fmadd_ps(_val0, _w0, _sum); + _sum = _mm256_fmadd_ps(_val0, _w0, _sum); tmpptr += 1; kptr += 8; diff --git a/src/layer/x86/convolution_sgemm_pack4to16.h b/src/layer/x86/convolution_sgemm_pack4to16.h new file mode 100644 index 00000000000..879f9e48dd6 --- /dev/null +++ b/src/layer/x86/convolution_sgemm_pack4to16.h @@ -0,0 +1,317 @@ +// Tencent is pleased to support the open source community by making ncnn available. +// +// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. +// +// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except +// in compliance with the License. You may obtain a copy of the License at +// +// https://opensource.org/licenses/BSD-3-Clause +// +// Unless required by applicable law or agreed to in writing, software distributed +// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +// CONDITIONS OF ANY KIND, either express or implied. See the License for the +// specific language governing permissions and limitations under the License. + +static void im2col_sgemm_pack4to16_avx512(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) +{ + // Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator); + + const int size = bottom_im2col.w; + const int maxk = bottom_im2col.h; + const int inch = bottom_im2col.c; + + const int outch = top_blob.c; + + const float* bias = _bias; + + // permute + Mat tmp; + if (size >= 16) + tmp.create(16 * maxk, inch, size / 16 + size % 16, 16u, 4, opt.workspace_allocator); + else + tmp.create(maxk, inch, size, 16u, 4, opt.workspace_allocator); + { + int nn_size = size >> 4; + int remain_size_start = 0; + + #pragma omp parallel for num_threads(opt.num_threads) + for (int ii = 0; ii < nn_size; ii++) + { + int i = remain_size_start + ii * 16; + + float* tmpptr = tmp.channel(i / 16); + + for (int q = 0; q < inch; q++) + { + const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; + + for (int k = 0; k < maxk; k++) + { + // transpose 4x16 + __m128 _r0 = _mm_load_ps(img0); + __m128 _r1 = _mm_load_ps(img0 + 4); + __m128 _r2 = _mm_load_ps(img0 + 4 * 2); + __m128 _r3 = _mm_load_ps(img0 + 4 * 3); + __m128 _r4 = _mm_load_ps(img0 + 4 * 4); + __m128 _r5 = _mm_load_ps(img0 + 4 * 5); + __m128 _r6 = _mm_load_ps(img0 + 4 * 6); + __m128 _r7 = _mm_load_ps(img0 + 4 * 7); + __m128 _r8 = _mm_load_ps(img0 + 4 * 8); + __m128 _r9 = _mm_load_ps(img0 + 4 * 9); + __m128 _ra = _mm_load_ps(img0 + 4 * 10); + __m128 _rb = _mm_load_ps(img0 + 4 * 11); + __m128 _rc = _mm_load_ps(img0 + 4 * 12); + __m128 _rd = _mm_load_ps(img0 + 4 * 13); + __m128 _re = _mm_load_ps(img0 + 4 * 14); + __m128 _rf = _mm_load_ps(img0 + 4 * 15); + + _MM_TRANSPOSE4_PS(_r0, _r1, _r2, _r3); + _MM_TRANSPOSE4_PS(_r4, _r5, _r6, _r7); + _MM_TRANSPOSE4_PS(_r8, _r9, _ra, _rb); + _MM_TRANSPOSE4_PS(_rc, _rd, _re, _rf); + + _mm_store_ps(tmpptr, _r0); + _mm_store_ps(tmpptr + 4, _r4); + _mm_store_ps(tmpptr + 4 * 2, _r8); + _mm_store_ps(tmpptr + 4 * 3, _rc); + _mm_store_ps(tmpptr + 4 * 4, _r1); + _mm_store_ps(tmpptr + 4 * 5, _r5); + _mm_store_ps(tmpptr + 4 * 6, _r9); + _mm_store_ps(tmpptr + 4 * 7, _rd); + _mm_store_ps(tmpptr + 4 * 8, _r2); + _mm_store_ps(tmpptr + 4 * 9, _r6); + _mm_store_ps(tmpptr + 4 * 10, _ra); + _mm_store_ps(tmpptr + 4 * 11, _re); + _mm_store_ps(tmpptr + 4 * 12, _r3); + _mm_store_ps(tmpptr + 4 * 13, _r7); + _mm_store_ps(tmpptr + 4 * 14, _rb); + _mm_store_ps(tmpptr + 4 * 15, _rf); + + img0 += size * 4; + tmpptr += 64; + } + } + } + + remain_size_start += nn_size << 4; + + #pragma omp parallel for num_threads(opt.num_threads) + for (int i = remain_size_start; i < size; i++) + { + float* tmpptr = tmp.channel(i / 16 + i % 16); + + for (int q = 0; q < inch; q++) + { + const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; + + for (int k = 0; k < maxk; k++) + { + __m128 _val = _mm_load_ps(img0); + _mm_store_ps(tmpptr, _val); + + img0 += size * 4; + tmpptr += 4; + } + } + } + } + + #pragma omp parallel for num_threads(opt.num_threads) + for (int p = 0; p < outch; p++) + { + float* outptr0 = top_blob.channel(p); + + const float zeros[16] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; + const float* biasptr = bias ? bias + p * 16 : zeros; + + int i = 0; + for (; i + 15 < size; i += 16) + { + float* tmpptr = tmp.channel(i / 16); + const float* kptr = kernel.channel(p); + + int nn = inch * maxk * 4; // inch always > 0 + + __m512 _sum0 = _mm512_loadu_ps(biasptr); + __m512 _sum1 = _sum0; + __m512 _sum2 = _sum0; + __m512 _sum3 = _sum0; + __m512 _sum4 = _sum0; + __m512 _sum5 = _sum0; + __m512 _sum6 = _sum0; + __m512 _sum7 = _sum0; + __m512 _sum8 = _sum0; + __m512 _sum9 = _sum0; + __m512 _suma = _sum0; + __m512 _sumb = _sum0; + __m512 _sumc = _sum0; + __m512 _sumd = _sum0; + __m512 _sume = _sum0; + __m512 _sumf = _sum0; + + for (int j = 0; j < nn; j++) + { + __m512 _w0 = _mm512_load_ps(kptr); + + __m512 _val0 = _mm512_set1_ps(tmpptr[0]); + __m512 _val1 = _mm512_set1_ps(tmpptr[1]); + _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); + _sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1); + __m512 _val2 = _mm512_set1_ps(tmpptr[2]); + __m512 _val3 = _mm512_set1_ps(tmpptr[3]); + _sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2); + _sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3); + __m512 _val4 = _mm512_set1_ps(tmpptr[4]); + __m512 _val5 = _mm512_set1_ps(tmpptr[5]); + _sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4); + _sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5); + __m512 _val6 = _mm512_set1_ps(tmpptr[6]); + __m512 _val7 = _mm512_set1_ps(tmpptr[7]); + _sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6); + _sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7); + __m512 _val8 = _mm512_set1_ps(tmpptr[8]); + __m512 _val9 = _mm512_set1_ps(tmpptr[9]); + _sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8); + _sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9); + __m512 _vala = _mm512_set1_ps(tmpptr[10]); + __m512 _valb = _mm512_set1_ps(tmpptr[11]); + _suma = _mm512_fmadd_ps(_vala, _w0, _suma); + _sumb = _mm512_fmadd_ps(_valb, _w0, _sumb); + __m512 _valc = _mm512_set1_ps(tmpptr[12]); + __m512 _vald = _mm512_set1_ps(tmpptr[13]); + _sumc = _mm512_fmadd_ps(_valc, _w0, _sumc); + _sumd = _mm512_fmadd_ps(_vald, _w0, _sumd); + __m512 _vale = _mm512_set1_ps(tmpptr[14]); + __m512 _valf = _mm512_set1_ps(tmpptr[15]); + _sume = _mm512_fmadd_ps(_vale, _w0, _sume); + _sumf = _mm512_fmadd_ps(_valf, _w0, _sumf); + + kptr += 16; + tmpptr += 16; + } + + _mm512_store_ps(outptr0, _sum0); + _mm512_store_ps(outptr0 + 16, _sum1); + _mm512_store_ps(outptr0 + 16 * 2, _sum2); + _mm512_store_ps(outptr0 + 16 * 3, _sum3); + _mm512_store_ps(outptr0 + 16 * 4, _sum4); + _mm512_store_ps(outptr0 + 16 * 5, _sum5); + _mm512_store_ps(outptr0 + 16 * 6, _sum6); + _mm512_store_ps(outptr0 + 16 * 7, _sum7); + _mm512_store_ps(outptr0 + 16 * 8, _sum8); + _mm512_store_ps(outptr0 + 16 * 9, _sum9); + _mm512_store_ps(outptr0 + 16 * 10, _suma); + _mm512_store_ps(outptr0 + 16 * 11, _sumb); + _mm512_store_ps(outptr0 + 16 * 12, _sumc); + _mm512_store_ps(outptr0 + 16 * 13, _sumd); + _mm512_store_ps(outptr0 + 16 * 14, _sume); + _mm512_store_ps(outptr0 + 16 * 15, _sumf); + + outptr0 += 16 * 16; + } + for (; i < size; i++) + { + float* tmpptr = tmp.channel(i / 16 + i % 16); + const float* kptr = kernel.channel(p); + + int nn = inch * maxk * 4; // inch always > 0 + + __m512 _sum0 = _mm512_loadu_ps(biasptr); + + for (int j = 0; j < nn; j++) + { + __m512 _w0 = _mm512_load_ps(kptr); + __m512 _val0 = _mm512_set1_ps(tmpptr[0]); + _sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0); + + kptr += 16; + tmpptr += 1; + } + + _mm512_store_ps(outptr0, _sum0); + outptr0 += 16; + } + } +} + +static void convolution_im2col_sgemm_transform_kernel_pack4to16_avx512(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) +{ + const int maxk = kernel_w * kernel_h; + + // interleave + // src = maxk-inch-outch + // dst = 16b-4a-maxk-inch/4a-outch/16b + Mat kernel = _kernel.reshape(maxk, inch, outch); + kernel_tm.create(16 * 4 * maxk, inch / 4, outch / 16, (size_t)4u); + + for (int q = 0; q + 15 < outch; q += 16) + { + float* g00 = kernel_tm.channel(q / 16); + + for (int p = 0; p + 3 < inch; p += 4) + { + for (int k = 0; k < maxk; k++) + { + for (int i = 0; i < 4; i++) + { + for (int j = 0; j < 16; j++) + { + const float* k00 = kernel.channel(q + j).row(p + i); + g00[0] = k00[k]; + g00++; + } + } + } + } + } +} + +static void convolution_im2col_sgemm_pack4to16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) +{ + int w = bottom_blob.w; + int inch = bottom_blob.c; + + int outw = top_blob.w; + int outh = top_blob.h; + const int size = outw * outh; + + const int maxk = kernel_w * kernel_h; + + // im2col + Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator); + { + const int gap = (w * stride_h - outw * stride_w) * 4; + + #pragma omp parallel for num_threads(opt.num_threads) + for (int p = 0; p < inch; p++) + { + const Mat img = bottom_blob.channel(p); + float* ptr = bottom_im2col.channel(p); + + for (int u = 0; u < kernel_h; u++) + { + for (int v = 0; v < kernel_w; v++) + { + const float* sptr = img.row(dilation_h * u) + dilation_w * v * 4; + + for (int i = 0; i < outh; i++) + { + int j = 0; + for (; j < outw; j++) + { + __m128 _val = _mm_load_ps(sptr); + _mm_store_ps(ptr, _val); + + sptr += stride_w * 4; + ptr += 4; + } + + sptr += gap; + } + } + } + } + } + + im2col_sgemm_pack4to16_avx512(bottom_im2col, top_blob, kernel, _bias, opt); +} diff --git a/src/layer/x86/convolution_x86.cpp b/src/layer/x86/convolution_x86.cpp index a0c71f4831a..50d19b1694e 100644 --- a/src/layer/x86/convolution_x86.cpp +++ b/src/layer/x86/convolution_x86.cpp @@ -111,15 +111,19 @@ namespace ncnn { #include "convolution_sgemm_pack16.h" #include "convolution_sgemm_pack8to16.h" +#include "convolution_sgemm_pack4to16.h" #include "convolution_sgemm_pack1to16.h" #include "convolution_sgemm_pack16to8.h" #include "convolution_sgemm_pack16to4.h" +#include "convolution_sgemm_pack16to1.h" #include "convolution_winograd_transform_pack16.h" #include "convolution_1x1_pack16.h" #include "convolution_1x1_pack8to16.h" +#include "convolution_1x1_pack4to16.h" #include "convolution_1x1_pack1to16.h" #include "convolution_1x1_pack16to8.h" #include "convolution_1x1_pack16to4.h" +#include "convolution_1x1_pack16to1.h" #include "convolution_3x3_pack16.h" #endif // __AVX512F__ #endif // __AVX__ @@ -326,6 +330,19 @@ int Convolution_x86::create_pipeline(const Option& opt) if (elempack == 4 && out_elempack == 16) { + if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1) + { + convolution_im2col_sgemm_transform_kernel_pack4to16_avx512(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h); + } + else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2) + { + convolution_im2col_sgemm_transform_kernel_pack4to16_avx512(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h); + } + else if (opt.use_sgemm_convolution) + { + convolution_im2col_sgemm_transform_kernel_pack4to16_avx512(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h); + } + else { convolution_transform_kernel_packed_sse(weight_data, weight_data_packed, num_input, num_output, kernel_w, kernel_h, elempack, out_elempack); } @@ -373,6 +390,19 @@ int Convolution_x86::create_pipeline(const Option& opt) if (elempack == 16 && out_elempack == 1) { + if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1) + { + convolution_im2col_sgemm_transform_kernel_pack16to1_avx512(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h); + } + else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2) + { + convolution_im2col_sgemm_transform_kernel_pack16to1_avx512(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h); + } + else if (opt.use_sgemm_convolution) + { + convolution_im2col_sgemm_transform_kernel_pack16to1_avx512(weight_data, weight_sgemm_data, num_input, num_output, kernel_w, kernel_h); + } + else { convolution_transform_kernel_packed_sse(weight_data, weight_data_packed, num_input, num_output, kernel_w, kernel_h, elempack, out_elempack); } @@ -863,6 +893,34 @@ int Convolution_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option if (elempack == 4 && out_elempack == 16) { + if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1) + { + conv1x1s1_sgemm_pack4to16_avx512(bottom_blob_bordered, top_blob, weight_sgemm_data, bias_data, opt); + + if (activation) + { + activation->forward_inplace(top_blob, opt); + } + } + else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2) + { + conv1x1s2_sgemm_pack4to16_avx512(bottom_blob_bordered, top_blob, weight_sgemm_data, bias_data, opt); + + if (activation) + { + activation->forward_inplace(top_blob, opt); + } + } + else if (opt.use_sgemm_convolution) + { + convolution_im2col_sgemm_pack4to16_avx512(bottom_blob_bordered, top_blob, weight_sgemm_data, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt); + + if (activation) + { + activation->forward_inplace(top_blob, opt); + } + } + else { convolution_pack4to16_avx512(bottom_blob_bordered, top_blob, weight_data_packed, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt); } @@ -940,6 +998,34 @@ int Convolution_x86::forward(const Mat& bottom_blob, Mat& top_blob, const Option if (elempack == 16 && out_elempack == 1) { + if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 1 && stride_h == 1) + { + conv1x1s1_sgemm_pack16to1_avx512(bottom_blob_bordered, top_blob, weight_sgemm_data, bias_data, opt); + + if (activation) + { + activation->forward_inplace(top_blob, opt); + } + } + else if (kernel_w == 1 && kernel_h == 1 && dilation_w == 1 && dilation_h == 1 && stride_w == 2 && stride_h == 2) + { + conv1x1s2_sgemm_pack16to1_avx512(bottom_blob_bordered, top_blob, weight_sgemm_data, bias_data, opt); + + if (activation) + { + activation->forward_inplace(top_blob, opt); + } + } + else if (opt.use_sgemm_convolution) + { + convolution_im2col_sgemm_pack16to1_avx512(bottom_blob_bordered, top_blob, weight_sgemm_data, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, opt); + + if (activation) + { + activation->forward_inplace(top_blob, opt); + } + } + else { convolution_pack16to1_avx512(bottom_blob_bordered, top_blob, weight_data_packed, bias_data, kernel_w, kernel_h, dilation_w, dilation_h, stride_w, stride_h, activation_type, activation_params, opt); } diff --git a/src/layer/x86/eltwise_x86.cpp b/src/layer/x86/eltwise_x86.cpp index 214a3b31d43..10fbbd4affc 100644 --- a/src/layer/x86/eltwise_x86.cpp +++ b/src/layer/x86/eltwise_x86.cpp @@ -38,182 +38,133 @@ int Eltwise_x86::forward(const std::vector& bottom_blobs, std::vector& int h = bottom_blob.h; int channels = bottom_blob.c; int elempack = bottom_blob.elempack; - int size = w * h; + int size = w * h * elempack; Mat& top_blob = top_blobs[0]; top_blob.create_like(bottom_blob, opt.blob_allocator); if (top_blob.empty()) return -100; -#if __SSE2__ -#if __AVX__ -#if __AVX512F__ - if (elempack == 16) + if (op_type == Operation_PROD) { - const size_t bottom_blob_count = bottom_blobs.size(); - std::vector tmp(bottom_blob_count); - for (size_t i = 0; i < bottom_blob_count; i++) + // first blob + const Mat& bottom_blob1 = bottom_blobs[1]; + #pragma omp parallel for num_threads(opt.num_threads) + for (int q = 0; q < channels; q++) { - convert_packing(bottom_blobs[i], tmp[i], 8, opt); - } - - std::vector tmpout(1); - forward(tmp, tmpout, opt); - - convert_packing(tmpout[0], top_blob, 16, opt); + const float* ptr = bottom_blob.channel(q); + const float* ptr1 = bottom_blob1.channel(q); + float* outptr = top_blob.channel(q); - return 0; - } + int i = 0; +#if __SSE2__ +#if __AVX__ +#if __AVX512F__ + for (; i + 15 < size; i += 16) + { + __m512 _p = _mm512_loadu_ps(ptr); + __m512 _p1 = _mm512_loadu_ps(ptr1); + _p = _mm512_mul_ps(_p, _p1); + _mm512_storeu_ps(outptr, _p); + + ptr += 16; + ptr1 += 16; + outptr += 16; + } #endif // __AVX512F__ + for (; i + 7 < size; i += 8) + { + __m256 _p = _mm256_loadu_ps(ptr); + __m256 _p1 = _mm256_loadu_ps(ptr1); + _p = _mm256_mul_ps(_p, _p1); + _mm256_storeu_ps(outptr, _p); + + ptr += 8; + ptr1 += 8; + outptr += 8; + } +#endif // __AVX__ + for (; i + 3 < size; i += 4) + { + __m128 _p = _mm_load_ps(ptr); + __m128 _p1 = _mm_load_ps(ptr1); + _p = _mm_mul_ps(_p, _p1); + _mm_store_ps(outptr, _p); + + ptr += 4; + ptr1 += 4; + outptr += 4; + } +#endif // __SSE2__ + for (; i < size; i++) + { + *outptr = *ptr * *ptr1; - if (elempack == 8) - { - if (op_type == Operation_PROD) + ptr++; + ptr1++; + outptr++; + } + } + + for (size_t b = 2; b < bottom_blobs.size(); b++) { - // first blob - const Mat& bottom_blob1 = bottom_blobs[1]; + const Mat& bottom_blob2 = bottom_blobs[b]; #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < channels; q++) { - const float* ptr = bottom_blob.channel(q); - const float* ptr1 = bottom_blob1.channel(q); + const float* ptr = bottom_blob2.channel(q); float* outptr = top_blob.channel(q); - for (int i = 0; i < size; i++) + int i = 0; +#if __SSE2__ +#if __AVX__ +#if __AVX512F__ + for (; i + 15 < size; i += 16) { - __m256 _p = _mm256_loadu_ps(ptr); - __m256 _p1 = _mm256_loadu_ps(ptr1); + __m512 _p = _mm512_loadu_ps(outptr); + __m512 _p1 = _mm512_loadu_ps(ptr); + _p = _mm512_mul_ps(_p, _p1); + _mm512_storeu_ps(outptr, _p); + + ptr += 16; + outptr += 16; + } +#endif // __AVX512F__ + for (; i + 7 < size; i += 8) + { + __m256 _p = _mm256_loadu_ps(outptr); + __m256 _p1 = _mm256_loadu_ps(ptr); _p = _mm256_mul_ps(_p, _p1); _mm256_storeu_ps(outptr, _p); ptr += 8; - ptr1 += 8; outptr += 8; } - } - - for (size_t b = 2; b < bottom_blobs.size(); b++) - { - const Mat& bottom_blob2 = bottom_blobs[b]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob2.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) - { - __m256 _p = _mm256_loadu_ps(outptr); - __m256 _p1 = _mm256_loadu_ps(ptr); - _p = _mm256_mul_ps(_p, _p1); - _mm256_storeu_ps(outptr, _p); - - ptr += 8; - outptr += 8; - } - } - } - } - if (op_type == Operation_SUM) - { - if (coeffs.w == 0) - { - // first blob - const Mat& bottom_blob1 = bottom_blobs[1]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) +#endif // __AVX__ + for (; i + 3 < size; i += 4) { - const float* ptr = bottom_blob.channel(q); - const float* ptr1 = bottom_blob1.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) - { - __m256 _p = _mm256_loadu_ps(ptr); - __m256 _p1 = _mm256_loadu_ps(ptr1); - _p = _mm256_add_ps(_p, _p1); - _mm256_storeu_ps(outptr, _p); - - ptr += 8; - ptr1 += 8; - outptr += 8; - } - } + __m128 _p = _mm_load_ps(outptr); + __m128 _p1 = _mm_load_ps(ptr); + _p = _mm_mul_ps(_p, _p1); + _mm_store_ps(outptr, _p); - for (size_t b = 2; b < bottom_blobs.size(); b++) - { - const Mat& bottom_blob2 = bottom_blobs[b]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob2.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) - { - __m256 _p = _mm256_loadu_ps(outptr); - __m256 _p1 = _mm256_loadu_ps(ptr); - _p = _mm256_add_ps(_p, _p1); - _mm256_storeu_ps(outptr, _p); - - ptr += 8; - outptr += 8; - } - } + ptr += 4; + outptr += 4; } - } - else - { - // first blob - const Mat& bottom_blob1 = bottom_blobs[1]; - __m256 _coeff0 = _mm256_set1_ps(coeffs[0]); - __m256 _coeff1 = _mm256_set1_ps(coeffs[1]); - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) +#endif // __SSE2__ + for (; i < size; i++) { - const float* ptr = bottom_blob.channel(q); - const float* ptr1 = bottom_blob1.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) - { - __m256 _p = _mm256_loadu_ps(ptr); - __m256 _p1 = _mm256_loadu_ps(ptr1); - _p = _mm256_mul_ps(_p, _coeff0); - _p = _mm256_comp_fmadd_ps(_p1, _coeff1, _p); - _mm256_storeu_ps(outptr, _p); - - ptr += 8; - ptr1 += 8; - outptr += 8; - } - } + *outptr *= *ptr; - for (size_t b = 2; b < bottom_blobs.size(); b++) - { - const Mat& bottom_blob2 = bottom_blobs[b]; - __m256 _coeff = _mm256_set1_ps(coeffs[b]); - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob2.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) - { - __m256 _p = _mm256_loadu_ps(outptr); - __m256 _p1 = _mm256_loadu_ps(ptr); - _p = _mm256_comp_fmadd_ps(_p1, _coeff, _p); - _mm256_storeu_ps(outptr, _p); - - ptr += 8; - outptr += 8; - } - } + ptr++; + outptr++; } } } - if (op_type == Operation_MAX) + } + if (op_type == Operation_SUM) + { + if (coeffs.w == 0) { // first blob const Mat& bottom_blob1 = bottom_blobs[1]; @@ -224,70 +175,54 @@ int Eltwise_x86::forward(const std::vector& bottom_blobs, std::vector& const float* ptr1 = bottom_blob1.channel(q); float* outptr = top_blob.channel(q); - for (int i = 0; i < size; i++) + int i = 0; +#if __SSE2__ +#if __AVX__ +#if __AVX512F__ + for (; i + 15 < size; i += 16) + { + __m512 _p = _mm512_loadu_ps(ptr); + __m512 _p1 = _mm512_loadu_ps(ptr1); + _p = _mm512_add_ps(_p, _p1); + _mm512_storeu_ps(outptr, _p); + + ptr += 16; + ptr1 += 16; + outptr += 16; + } +#endif // __AVX512F__ + for (; i + 7 < size; i += 8) { __m256 _p = _mm256_loadu_ps(ptr); __m256 _p1 = _mm256_loadu_ps(ptr1); - _p = _mm256_max_ps(_p, _p1); + _p = _mm256_add_ps(_p, _p1); _mm256_storeu_ps(outptr, _p); ptr += 8; ptr1 += 8; outptr += 8; } - } - - for (size_t b = 2; b < bottom_blobs.size(); b++) - { - const Mat& bottom_blob2 = bottom_blobs[b]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob2.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) - { - __m256 _p = _mm256_loadu_ps(outptr); - __m256 _p1 = _mm256_loadu_ps(ptr); - _p = _mm256_max_ps(_p, _p1); - _mm256_storeu_ps(outptr, _p); - - ptr += 8; - outptr += 8; - } - } - } - } - - return 0; - } #endif // __AVX__ - - if (elempack == 4) - { - if (op_type == Operation_PROD) - { - // first blob - const Mat& bottom_blob1 = bottom_blobs[1]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob.channel(q); - const float* ptr1 = bottom_blob1.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) + for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); __m128 _p1 = _mm_load_ps(ptr1); - _p = _mm_mul_ps(_p, _p1); + _p = _mm_add_ps(_p, _p1); _mm_store_ps(outptr, _p); ptr += 4; ptr1 += 4; outptr += 4; } +#endif // __SSE2__ + for (; i < size; i++) + { + *outptr = *ptr + *ptr1; + + ptr++; + ptr1++; + outptr++; + } } for (size_t b = 2; b < bottom_blobs.size(); b++) @@ -299,121 +234,54 @@ int Eltwise_x86::forward(const std::vector& bottom_blobs, std::vector& const float* ptr = bottom_blob2.channel(q); float* outptr = top_blob.channel(q); - for (int i = 0; i < size; i++) + int i = 0; +#if __SSE2__ +#if __AVX__ +#if __AVX512F__ + for (; i + 15 < size; i += 16) { - __m128 _p = _mm_load_ps(outptr); - __m128 _p1 = _mm_load_ps(ptr); - _p = _mm_mul_ps(_p, _p1); - _mm_store_ps(outptr, _p); + __m512 _p = _mm512_loadu_ps(outptr); + __m512 _p1 = _mm512_loadu_ps(ptr); + _p = _mm512_add_ps(_p, _p1); + _mm512_storeu_ps(outptr, _p); - ptr += 4; - outptr += 4; + ptr += 16; + outptr += 16; } - } - } - } - if (op_type == Operation_SUM) - { - if (coeffs.w == 0) - { - // first blob - const Mat& bottom_blob1 = bottom_blobs[1]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob.channel(q); - const float* ptr1 = bottom_blob1.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) +#endif // __AVX512F__ + for (; i + 7 < size; i += 8) { - __m128 _p = _mm_load_ps(ptr); - __m128 _p1 = _mm_load_ps(ptr1); - _p = _mm_add_ps(_p, _p1); - _mm_store_ps(outptr, _p); - - ptr += 4; - ptr1 += 4; - outptr += 4; - } - } + __m256 _p = _mm256_loadu_ps(outptr); + __m256 _p1 = _mm256_loadu_ps(ptr); + _p = _mm256_add_ps(_p, _p1); + _mm256_storeu_ps(outptr, _p); - for (size_t b = 2; b < bottom_blobs.size(); b++) - { - const Mat& bottom_blob2 = bottom_blobs[b]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob2.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) - { - __m128 _p = _mm_load_ps(outptr); - __m128 _p1 = _mm_load_ps(ptr); - _p = _mm_add_ps(_p, _p1); - _mm_store_ps(outptr, _p); - - ptr += 4; - outptr += 4; - } + ptr += 8; + outptr += 8; } - } - } - else - { - // first blob - const Mat& bottom_blob1 = bottom_blobs[1]; - __m128 _coeff0 = _mm_set1_ps(coeffs[0]); - __m128 _coeff1 = _mm_set1_ps(coeffs[1]); - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob.channel(q); - const float* ptr1 = bottom_blob1.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) +#endif // __AVX__ + for (; i + 3 < size; i += 4) { - __m128 _p = _mm_load_ps(ptr); - __m128 _p1 = _mm_load_ps(ptr1); - _p = _mm_mul_ps(_p, _coeff0); - _p1 = _mm_mul_ps(_p1, _coeff1); - _p = _mm_add_ps(_p1, _p); + __m128 _p = _mm_load_ps(outptr); + __m128 _p1 = _mm_load_ps(ptr); + _p = _mm_add_ps(_p, _p1); _mm_store_ps(outptr, _p); ptr += 4; - ptr1 += 4; outptr += 4; } - } - - for (size_t b = 2; b < bottom_blobs.size(); b++) - { - const Mat& bottom_blob2 = bottom_blobs[b]; - __m128 _coeff = _mm_set1_ps(coeffs[b]); - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) +#endif // __SSE2__ + for (; i < size; i++) { - const float* ptr = bottom_blob2.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) - { - __m128 _p1 = _mm_load_ps(ptr); - __m128 _p = _mm_load_ps(outptr); - _p1 = _mm_mul_ps(_p1, _coeff); - _p = _mm_add_ps(_p1, _p); - _mm_store_ps(outptr, _p); - - ptr += 4; - outptr += 4; - } + *outptr += *ptr; + + ptr++; + outptr++; } } } } - if (op_type == Operation_MAX) + else { // first blob const Mat& bottom_blob1 = bottom_blobs[1]; @@ -424,104 +292,62 @@ int Eltwise_x86::forward(const std::vector& bottom_blobs, std::vector& const float* ptr1 = bottom_blob1.channel(q); float* outptr = top_blob.channel(q); - for (int i = 0; i < size; i++) + const float coeff0 = coeffs[0]; + const float coeff1 = coeffs[1]; + + int i = 0; +#if __SSE2__ +#if __AVX__ +#if __AVX512F__ + __m512 _coeff0_avx512 = _mm512_set1_ps(coeff0); + __m512 _coeff1_avx512 = _mm512_set1_ps(coeff1); + for (; i + 15 < size; i += 16) + { + __m512 _p = _mm512_loadu_ps(ptr); + __m512 _p1 = _mm512_loadu_ps(ptr1); + _p = _mm512_mul_ps(_p, _coeff0_avx512); + _p = _mm512_fmadd_ps(_p1, _coeff1_avx512, _p); + _mm512_storeu_ps(outptr, _p); + + ptr += 16; + ptr1 += 16; + outptr += 16; + } +#endif // __AVX512F__ + __m256 _coeff0_avx = _mm256_set1_ps(coeff0); + __m256 _coeff1_avx = _mm256_set1_ps(coeff1); + for (; i + 7 < size; i += 8) + { + __m256 _p = _mm256_loadu_ps(ptr); + __m256 _p1 = _mm256_loadu_ps(ptr1); + _p = _mm256_mul_ps(_p, _coeff0_avx); + _p = _mm256_comp_fmadd_ps(_p1, _coeff1_avx, _p); + _mm256_storeu_ps(outptr, _p); + + ptr += 8; + ptr1 += 8; + outptr += 8; + } +#endif // __AVX__ + __m128 _coeff0 = _mm_set1_ps(coeff0); + __m128 _coeff1 = _mm_set1_ps(coeff1); + for (; i + 3 < size; i += 4) { __m128 _p = _mm_load_ps(ptr); __m128 _p1 = _mm_load_ps(ptr1); - _p = _mm_max_ps(_p, _p1); + _p = _mm_mul_ps(_p, _coeff0); + _p1 = _mm_mul_ps(_p1, _coeff1); + _p = _mm_add_ps(_p1, _p); _mm_store_ps(outptr, _p); ptr += 4; ptr1 += 4; outptr += 4; } - } - - for (size_t b = 2; b < bottom_blobs.size(); b++) - { - const Mat& bottom_blob2 = bottom_blobs[b]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob2.channel(q); - float* outptr = top_blob.channel(q); - - for (int i = 0; i < size; i++) - { - __m128 _p = _mm_load_ps(outptr); - __m128 _p1 = _mm_load_ps(ptr); - _p = _mm_max_ps(_p, _p1); - _mm_store_ps(outptr, _p); - - ptr += 4; - outptr += 4; - } - } - } - } - - return 0; - } #endif // __SSE2__ - - if (op_type == Operation_PROD) - { - // first blob - const Mat& bottom_blob1 = bottom_blobs[1]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob.channel(q); - const float* ptr1 = bottom_blob1.channel(q); - float* outptr = top_blob.channel(q); - int remain = size; - for (; remain > 0; remain--) - { - *outptr = *ptr * *ptr1; - - ptr++; - ptr1++; - outptr++; - } - } - - for (size_t b = 2; b < bottom_blobs.size(); b++) - { - const Mat& bottom_blob2 = bottom_blobs[b]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob2.channel(q); - float* outptr = top_blob.channel(q); - int remain = size; - - for (; remain > 0; remain--) - { - *outptr *= *ptr; - - ptr++; - outptr++; - } - } - } - } - if (op_type == Operation_SUM) - { - if (coeffs.w == 0) - { - // first blob - const Mat& bottom_blob1 = bottom_blobs[1]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob.channel(q); - const float* ptr1 = bottom_blob1.channel(q); - float* outptr = top_blob.channel(q); - int remain = size; - - for (; remain > 0; remain--) + for (; i < size; i++) { - *outptr = *ptr + *ptr1; + *outptr = *ptr * coeff0 + *ptr1 * coeff1; ptr++; ptr1++; @@ -538,52 +364,50 @@ int Eltwise_x86::forward(const std::vector& bottom_blobs, std::vector& const float* ptr = bottom_blob2.channel(q); float* outptr = top_blob.channel(q); - int remain = size; - for (; remain > 0; remain--) + const float coeff = coeffs[b]; + + int i = 0; +#if __SSE2__ +#if __AVX__ +#if __AVX512F__ + __m512 _coeff_avx512 = _mm512_set1_ps(coeff); + for (; i + 15 < size; i += 16) { - *outptr += *ptr; + __m512 _p = _mm512_loadu_ps(outptr); + __m512 _p1 = _mm512_loadu_ps(ptr); + _p = _mm512_fmadd_ps(_p1, _coeff_avx512, _p); + _mm512_storeu_ps(outptr, _p); - ptr++; - outptr++; + ptr += 16; + outptr += 16; } - } - } - } - else - { - // first blob - const Mat& bottom_blob1 = bottom_blobs[1]; - float coeff0 = coeffs[0]; - float coeff1 = coeffs[1]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob.channel(q); - const float* ptr1 = bottom_blob1.channel(q); - float* outptr = top_blob.channel(q); - int remain = size; - for (; remain > 0; remain--) - { - *outptr = *ptr * coeff0 + *ptr1 * coeff1; - - ptr++; - ptr1++; - outptr++; - } - } +#endif // __AVX512F__ + __m256 _coeff_avx = _mm256_set1_ps(coeff); + for (; i + 7 < size; i += 8) + { + __m256 _p = _mm256_loadu_ps(outptr); + __m256 _p1 = _mm256_loadu_ps(ptr); + _p = _mm256_comp_fmadd_ps(_p1, _coeff_avx, _p); + _mm256_storeu_ps(outptr, _p); - for (size_t b = 2; b < bottom_blobs.size(); b++) - { - const Mat& bottom_blob2 = bottom_blobs[b]; - float coeff = coeffs[b]; - #pragma omp parallel for num_threads(opt.num_threads) - for (int q = 0; q < channels; q++) - { - const float* ptr = bottom_blob2.channel(q); - float* outptr = top_blob.channel(q); + ptr += 8; + outptr += 8; + } +#endif // __AVX__ + __m128 _coeff = _mm_set1_ps(coeff); + for (; i + 3 < size; i += 4) + { + __m128 _p1 = _mm_load_ps(ptr); + __m128 _p = _mm_load_ps(outptr); + _p1 = _mm_mul_ps(_p1, _coeff); + _p = _mm_add_ps(_p1, _p); + _mm_store_ps(outptr, _p); - int remain = size; - for (; remain > 0; remain--) + ptr += 4; + outptr += 4; + } +#endif // __SSE2__ + for (; i < size; i++) { *outptr += *ptr * coeff; @@ -605,8 +429,47 @@ int Eltwise_x86::forward(const std::vector& bottom_blobs, std::vector& const float* ptr1 = bottom_blob1.channel(q); float* outptr = top_blob.channel(q); - int remain = size; - for (; remain > 0; remain--) + int i = 0; +#if __SSE2__ +#if __AVX__ +#if __AVX512F__ + for (; i + 15 < size; i += 16) + { + __m512 _p = _mm512_loadu_ps(ptr); + __m512 _p1 = _mm512_loadu_ps(ptr1); + _p = _mm512_max_ps(_p, _p1); + _mm512_storeu_ps(outptr, _p); + + ptr += 16; + ptr1 += 16; + outptr += 16; + } +#endif // __AVX512F__ + for (; i + 7 < size; i += 8) + { + __m256 _p = _mm256_loadu_ps(ptr); + __m256 _p1 = _mm256_loadu_ps(ptr1); + _p = _mm256_max_ps(_p, _p1); + _mm256_storeu_ps(outptr, _p); + + ptr += 8; + ptr1 += 8; + outptr += 8; + } +#endif // __AVX__ + for (; i + 3 < size; i += 4) + { + __m128 _p = _mm_load_ps(ptr); + __m128 _p1 = _mm_load_ps(ptr1); + _p = _mm_max_ps(_p, _p1); + _mm_store_ps(outptr, _p); + + ptr += 4; + ptr1 += 4; + outptr += 4; + } +#endif // __SSE2__ + for (; i < size; i++) { *outptr = std::max(*ptr, *ptr1); @@ -625,8 +488,44 @@ int Eltwise_x86::forward(const std::vector& bottom_blobs, std::vector& const float* ptr = bottom_blob2.channel(q); float* outptr = top_blob.channel(q); - int remain = size; - for (; remain > 0; remain--) + int i = 0; +#if __SSE2__ +#if __AVX__ +#if __AVX512F__ + for (; i + 15 < size; i += 16) + { + __m512 _p = _mm512_loadu_ps(outptr); + __m512 _p1 = _mm512_loadu_ps(ptr); + _p = _mm512_max_ps(_p, _p1); + _mm512_storeu_ps(outptr, _p); + + ptr += 16; + outptr += 16; + } +#endif // __AVX512F__ + for (; i + 7 < size; i += 8) + { + __m256 _p = _mm256_loadu_ps(outptr); + __m256 _p1 = _mm256_loadu_ps(ptr); + _p = _mm256_max_ps(_p, _p1); + _mm256_storeu_ps(outptr, _p); + + ptr += 8; + outptr += 8; + } +#endif // __AVX__ + for (; i + 3 < size; i += 4) + { + __m128 _p = _mm_load_ps(outptr); + __m128 _p1 = _mm_load_ps(ptr); + _p = _mm_max_ps(_p, _p1); + _mm_store_ps(outptr, _p); + + ptr += 4; + outptr += 4; + } +#endif // __SSE2__ + for (; i < size; i++) { *outptr = std::max(*ptr, *outptr);