Browse Source

optimized sbgemm kernel for neoverse-v1 (sve-256)

Signed-off-by: Ye Tao <ye.tao@arm.com>
tags/v0.3.30
Ye Tao 10 months ago
parent
commit
c748e6a338
12 changed files with 789 additions and 987 deletions
  1. +2
    -0
      CONTRIBUTORS.md
  2. +2
    -2
      Makefile.arm64
  3. +2
    -2
      cmake/system.cmake
  4. +8
    -4
      kernel/arm64/KERNEL.NEOVERSEV1
  5. +3
    -3
      kernel/arm64/sbgemm_kernel_4x4_neoversev1.c
  6. +414
    -0
      kernel/arm64/sbgemm_kernel_4x4_neoversev1_impl.c
  7. +0
    -472
      kernel/arm64/sbgemm_kernel_8x4_neoversev1_impl.c
  8. +75
    -54
      kernel/arm64/sbgemm_ncopy_4_neoversev1.c
  9. +0
    -180
      kernel/arm64/sbgemm_ncopy_8_neoversev1.c
  10. +280
    -67
      kernel/arm64/sbgemm_tcopy_4_neoversev1.c
  11. +0
    -200
      kernel/arm64/sbgemm_tcopy_8_neoversev1.c
  12. +3
    -3
      param.h

+ 2
- 0
CONTRIBUTORS.md View File

@@ -240,3 +240,5 @@ In chronological order:
* Marek Michalowski <https://github.com/michalowski-arm>
* [2025-01-21] Add thread throttling profile for SGEMV on `NEOVERSEV1`

* Ye Tao <ye.tao@arm.com>
* [2025-02-03] Optimize SBGEMM kernel on NEOVERSEV1

+ 2
- 2
Makefile.arm64 View File

@@ -101,7 +101,7 @@ ifeq ($(CORE), NEOVERSEV1)
ifeq (1, $(filter 1,$(GCCVERSIONGTEQ7) $(ISCLANG)))
ifeq (1, $(filter 1,$(GCCVERSIONGTEQ10) $(ISCLANG)))
ifeq (1, $(filter 1,$(GCCMINORVERSIONGTEQ4) $(GCCVERSIONGTEQ11) $(ISCLANG)))
CCOMMON_OPT += -march=armv8.4-a+sve
CCOMMON_OPT += -march=armv8.4-a+sve+bf16
ifeq (1, $(ISCLANG))
CCOMMON_OPT += -mtune=cortex-x1
else
@@ -111,7 +111,7 @@ ifneq ($(F_COMPILER), NAG)
FCOMMON_OPT += -march=armv8.4-a -mtune=neoverse-v1
endif
else
CCOMMON_OPT += -march=armv8.4-a+sve
CCOMMON_OPT += -march=armv8.4-a+sve+bf16
ifneq ($(CROSS), 1)
CCOMMON_OPT += -mtune=native
endif


+ 2
- 2
cmake/system.cmake View File

@@ -291,10 +291,10 @@ if (DEFINED TARGET)

if (${TARGET} STREQUAL NEOVERSEV1)
if (${CMAKE_C_COMPILER_ID} STREQUAL "PGI" AND NOT NO_SVE)
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -Msve_intrinsics -march=armv8.4-a+sve+bf16 -mtune=neoverse-v1")
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -Msve_intrinsics -march=armv8.4-a+sve -mtune=neoverse-v1")
else ()
if (CMAKE_C_COMPILER_VERSION VERSION_GREATER 10.4 OR CMAKE_C_COMPILER_VERSION VERSION_EQUAL 10.4)
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=armv8.4-a+sve+bf16 -mtune=neoverse-v1")
set (KERNEL_DEFINITIONS "${KERNEL_DEFINITIONS} -march=armv8.4-a+sve -mtune=neoverse-v1")
else ()
message(FATAL_ERROR "Compiler ${CMAKE_C_COMPILER} ${CMAKE_C_COMPILER_VERSION} does not support Neoverse V1.")
endif()


+ 8
- 4
kernel/arm64/KERNEL.NEOVERSEV1 View File

@@ -1,14 +1,18 @@
include $(KERNELDIR)/KERNEL.ARMV8SVE

SGEMVTKERNEL = gemv_t_sve.c
DGEMVTKERNEL = gemv_t_sve.c
SGEMVTKERNEL = gemv_t_sve_v1x3.c
DGEMVTKERNEL = gemv_t_sve_v1x3.c
ifeq ($(BUILD_BFLOAT16), 1)
SBGEMM_BETA = sbgemm_beta_neoversev1.c
SBGEMMKERNEL = sbgemm_kernel_$(SBGEMM_UNROLL_M)x$(SBGEMM_UNROLL_N)_neoversev1.c
ifneq ($(SBGEMM_UNROLL_M), $(SBGEMM_UNROLL_N))
SBGEMMINCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_M)_neoversev1.c
SBGEMMITCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_M)_neoversev1.c
SBGEMMONCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_N)_neoversev1.c
SBGEMMOTCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_N)_neoversev1.c
SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX)
SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX)
endif
SBGEMMONCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_N)_neoversev1.c
SBGEMMOTCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_N)_neoversev1.c
SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX)
SBGEMMOTCOPYOBJ = sbgemm_otcopy$(TSUFFIX).$(SUFFIX)
endif

kernel/arm64/sbgemm_kernel_8x4_neoversev1.c → kernel/arm64/sbgemm_kernel_4x4_neoversev1.c View File

@@ -1,5 +1,5 @@
/***************************************************************************
* Copyright (c) 2024, The OpenBLAS Project
* Copyright (c) 2024-2025, The OpenBLAS Project
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -31,9 +31,9 @@
#include "common.h"

#define ALPHA_ONE
#include "sbgemm_kernel_8x4_neoversev1_impl.c"
#include "sbgemm_kernel_4x4_neoversev1_impl.c"
#undef ALPHA_ONE
#include "sbgemm_kernel_8x4_neoversev1_impl.c"
#include "sbgemm_kernel_4x4_neoversev1_impl.c"

int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B,
FLOAT *C, BLASLONG ldc) {

+ 414
- 0
kernel/arm64/sbgemm_kernel_4x4_neoversev1_impl.c View File

@@ -0,0 +1,414 @@
/***************************************************************************
* Copyright (c) 2024-2025, The OpenBLAS Project
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of the OpenBLAS project nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* *****************************************************************************/

#include <arm_sve.h>

#include "common.h"

#define INIT_C(M, N) mc##M##N = svdup_f32(0);

#define MATMUL(M, N) mc##M##N = svbfmmla(mc##M##N, ma##M, mb##N);

#define INIT_C_4x4 \
do { \
INIT_C(0, 0); \
INIT_C(0, 1); \
INIT_C(1, 0); \
INIT_C(1, 1); \
} while (0);

#ifdef ALPHA_ONE
#define UPDATE_C(PG, PTR, DST, SRC) \
do { \
DST = svld1_f32((PG), (PTR)); \
DST = svadd_z((PG), SRC, DST); \
svst1_f32((PG), (PTR), DST); \
} while (0);
#else
#define UPDATE_C(PG, PTR, DST, SRC) \
do { \
DST = svld1_f32((PG), (PTR)); \
DST = svmad_z((PG), svalpha, SRC, DST); \
svst1_f32((PG), (PTR), DST); \
} while (0);
#endif

#define ZIP_EVEN_ELEMENTS(PG, mc0, mc1, tmp, vc) \
do { \
(tmp) = svuzp1_f32((mc0), (mc1)); \
(vc) = svcompact_f32((PG), (tmp)); \
} while (0)

#define ZIP_ODD_ELEMENTS(PG, mc0, mc1, tmp, vc) \
do { \
(tmp) = svuzp2_f32((mc0), (mc1)); \
(vc) = svcompact_f32((PG), (tmp)); \
} while (0)

#define ACCUMULATE_LAST4_TO_FIRST4(M, N, TMP) \
do { \
TMP = svext_f32(mc##M##N, mc##M##N, 4); \
mc##M##N = svadd_f32_z(svptrue_b32(), mc##M##N, (TMP)); \
} while (0)

#ifdef ALPHA_ONE
int sbgemm_kernel_neoversev1_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k,
FLOAT alpha, IFLOAT *A, IFLOAT *B,
FLOAT *C, BLASLONG ldc)
#else
int sbgemm_kernel_neoversev1_alpha(BLASLONG m, BLASLONG n, BLASLONG k,
FLOAT alpha, IFLOAT *A, IFLOAT *B, FLOAT *C,
BLASLONG ldc)
#endif
{

BLASLONG pad_k = (k + 7) & ~7;
svbfloat16_t ma0, ma1, mb0, mb1;
svfloat32_t mc00, mc01, mc10, mc11, vc0, vc1, vc2, vc3, oc0, oc1, oc2, oc3;
svfloat32_t tmp;
svfloat32_t svalpha = svdup_f32(alpha);

svbool_t pg16_all = svptrue_b16();

svbool_t pg32_first_1 = svwhilelt_b32(0, 1);
svbool_t pg32_first_2 = svwhilelt_b32(0, 2);
svbool_t pg32_first_4 = svwhilelt_b32(0, 4);

svbool_t pg32_select_first_2_per_quadword = svdupq_b32(1, 1, 0, 0);

bfloat16_t *ptr_a = (bfloat16_t *)A;
bfloat16_t *ptr_b = (bfloat16_t *)B;
FLOAT *ptr_c = C;

bfloat16_t *ptr_a0;
bfloat16_t *ptr_b0;
FLOAT *ptr_c0, *ptr_c1, *ptr_c2, *ptr_c3;

for (BLASLONG j = 0; j < n / 4; j++) {
ptr_c0 = ptr_c;
ptr_c1 = ptr_c0 + ldc;
ptr_c2 = ptr_c1 + ldc;
ptr_c3 = ptr_c2 + ldc;
ptr_c += 4 * ldc;
ptr_a = (bfloat16_t *)A;

for (BLASLONG i = 0; i < m / 4; i++) {
ptr_a0 = ptr_a;
ptr_a += 4 * pad_k;

ptr_b0 = ptr_b;

INIT_C_4x4;

for (BLASLONG p = 0; p < pad_k; p += 8) {
ma0 = svld1_bf16(pg16_all, ptr_a0);
ma1 = svld1_bf16(pg16_all, ptr_a0 + 16);

mb0 = svld1_bf16(pg16_all, ptr_b0);
mb1 = svld1_bf16(pg16_all, ptr_b0 + 16);

MATMUL(0, 0);
MATMUL(0, 1);
MATMUL(1, 0);
MATMUL(1, 1);

ptr_a0 += 32;
ptr_b0 += 32;
}

ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp);
ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp);
ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp);
ACCUMULATE_LAST4_TO_FIRST4(1, 1, tmp);

ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0);
ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc1);

ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc01, mc11, tmp, vc2);
ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc01, mc11, tmp, vc3);

UPDATE_C(pg32_first_4, ptr_c0, oc0, vc0);
UPDATE_C(pg32_first_4, ptr_c1, oc1, vc1);
UPDATE_C(pg32_first_4, ptr_c2, oc2, vc2)
UPDATE_C(pg32_first_4, ptr_c3, oc3, vc3)

ptr_c0 += 4;
ptr_c1 += 4;
ptr_c2 += 4;
ptr_c3 += 4;
}

if (m & 2) {
ptr_a0 = ptr_a;
ptr_a += 2 * pad_k;

ptr_b0 = ptr_b;
INIT_C(0, 0);
INIT_C(0, 1);
for (BLASLONG p = 0; p < pad_k; p += 8) {
ma0 = svld1_bf16(pg16_all, ptr_a0);
mb0 = svld1_bf16(pg16_all, ptr_b0);
mb1 = svld1_bf16(pg16_all, ptr_b0 + 16);

MATMUL(0, 0);
MATMUL(0, 1);

ptr_a0 += 16;
ptr_b0 += 32;
}

ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp);
ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp);

vc0 = svuzp1(mc00, mc00);
vc1 = svuzp2(mc00, mc00);
vc2 = svuzp1(mc01, mc01);
vc3 = svuzp2(mc01, mc01);

UPDATE_C(pg32_first_2, ptr_c0, oc0, vc0);
UPDATE_C(pg32_first_2, ptr_c1, oc1, vc1);
UPDATE_C(pg32_first_2, ptr_c2, oc2, vc2);
UPDATE_C(pg32_first_2, ptr_c3, oc3, vc3);

ptr_c0 += 2;
ptr_c1 += 2;
ptr_c2 += 2;
ptr_c3 += 2;
}

if (m & 1) {
ptr_a0 = ptr_a;
ptr_b0 = ptr_b;

INIT_C(0, 0);
INIT_C(0, 1);
for (BLASLONG p = 0; p < pad_k; p += 8) {
ma0 = svld1_bf16(pg16_all, ptr_a0);
mb0 = svld1_bf16(pg16_all, ptr_b0);
mb1 = svld1_bf16(pg16_all, ptr_b0 + 16);

MATMUL(0, 0);
MATMUL(0, 1);

ptr_a0 += 16;
ptr_b0 += 32;
}

ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp);
ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp);

// use compact is more straightforward
vc1 = svuzp2(mc00, mc00);
vc3 = svuzp2(mc01, mc01);

UPDATE_C(pg32_first_1, ptr_c0, oc0, mc00);
UPDATE_C(pg32_first_1, ptr_c1, oc1, vc1);
UPDATE_C(pg32_first_1, ptr_c2, oc2, mc01);
UPDATE_C(pg32_first_1, ptr_c3, oc3, vc3);
}

ptr_b += 4 * pad_k;
}

if (n & 2) {
ptr_c0 = ptr_c;
ptr_c1 = ptr_c0 + ldc;
ptr_c += 2 * ldc;
ptr_a = (bfloat16_t *)A;

for (BLASLONG i = 0; i < m / 4; i++) {
ptr_a0 = ptr_a;
ptr_a += 4 * pad_k;

ptr_b0 = ptr_b;

INIT_C(0, 0);
INIT_C(1, 0);

for (BLASLONG p = 0; p < pad_k; p += 8) {
ma0 = svld1_bf16(pg16_all, ptr_a0);
ma1 = svld1_bf16(pg16_all, ptr_a0 + 16);

mb0 = svld1_bf16(pg16_all, ptr_b0);

MATMUL(0, 0);
MATMUL(1, 0);

ptr_a0 += 32;
ptr_b0 += 16;
}

ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp);
ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp);

ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0);
ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc2);

UPDATE_C(pg32_first_4, ptr_c0, oc0, vc0);
UPDATE_C(pg32_first_4, ptr_c1, oc2, vc2);

ptr_c0 += 4;
ptr_c1 += 4;
}

if (m & 2) {
ptr_a0 = ptr_a;
ptr_a += 2 * pad_k;
ptr_b0 = ptr_b;

INIT_C(0, 0);

for (BLASLONG p = 0; p < pad_k; p += 8) {
ma0 = svld1_bf16(pg16_all, ptr_a0);
mb0 = svld1_bf16(pg16_all, ptr_b0);

MATMUL(0, 0);

ptr_a0 += 16;
ptr_b0 += 16;
}

ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp);
vc0 = svuzp1(mc00, mc00);
vc1 = svuzp2(mc00, mc00);

UPDATE_C(pg32_first_2, ptr_c0, oc0, vc0);
UPDATE_C(pg32_first_2, ptr_c1, oc1, vc1);

ptr_c0 += 2;
ptr_c1 += 2;
}

if (m & 1) {
ptr_a0 = ptr_a;
ptr_b0 = ptr_b;
INIT_C(0, 0);
for (BLASLONG p = 0; p < pad_k; p += 8) {
ma0 = svld1_bf16(pg16_all, ptr_a0);
mb0 = svld1_bf16(pg16_all, ptr_b0);
MATMUL(0, 0);
ptr_a0 += 16;
ptr_b0 += 16;
}

ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp);
vc1 = svuzp2(mc00, mc00);

UPDATE_C(pg32_first_1, ptr_c0, oc0, mc00);
UPDATE_C(pg32_first_1, ptr_c1, oc1, vc1);
}

ptr_b += 2 * pad_k;
}

if (n & 1) { // TODO: this case seems a overhead. find out whether it's in our
// case.
ptr_c0 = ptr_c;
ptr_a = (bfloat16_t *)A;

for (BLASLONG i = 0; i < m / 4; i++) {
ptr_a0 = ptr_a;
ptr_a += 4 * pad_k;

ptr_b0 = ptr_b;

INIT_C(0, 0);
INIT_C(1, 0);

for (BLASLONG p = 0; p < pad_k; p += 8) {
ma0 = svld1_bf16(pg16_all, ptr_a0);
ma1 = svld1_bf16(pg16_all, ptr_a0 + 16);

mb0 = svld1_bf16(pg16_all, ptr_b0);

MATMUL(0, 0);
MATMUL(1, 0);

ptr_a0 += 32;
ptr_b0 += 16;
}

ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp);
ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp);

ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0);

UPDATE_C(pg32_first_4, ptr_c0, oc0, vc0);

ptr_c0 += 4;
}

if (m & 2) {
ptr_a0 = ptr_a;
ptr_a += 2 * pad_k;
ptr_b0 = ptr_b;

INIT_C(0, 0);

for (BLASLONG p = 0; p < pad_k; p += 8) {
ma0 = svld1_bf16(pg16_all, ptr_a0);
mb0 = svld1_bf16(pg16_all, ptr_b0);

MATMUL(0, 0);

ptr_a0 += 16;
ptr_b0 += 16;
}

ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp);

vc0 = svuzp1(mc00, mc00);

UPDATE_C(pg32_first_2, ptr_c0, oc0, vc0);

ptr_c0 += 2;
}

if (m & 1) {
ptr_a0 = ptr_a;
ptr_b0 = ptr_b;

INIT_C(0, 0);
for (BLASLONG p = 0; p < pad_k; p += 8) {

ma0 = svld1_bf16(pg16_all, ptr_a0);
mb0 = svld1_bf16(pg16_all, ptr_b0);

MATMUL(0, 0);
ptr_a0 += 16;
ptr_b0 += 16;
}

ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp);

UPDATE_C(pg32_first_1, ptr_c0, oc0, mc00);
}
}

return 0;
}

+ 0
- 472
kernel/arm64/sbgemm_kernel_8x4_neoversev1_impl.c View File

@@ -1,472 +0,0 @@
/***************************************************************************
* Copyright (c) 2024, The OpenBLAS Project
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of the OpenBLAS project nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* *****************************************************************************/

#include <arm_sve.h>

#include "common.h"

#define INIT_C(M, N) mc##M##N = svdup_f32(0);

#define MATMUL(M, N) mc##M##N = svbfmmla(mc##M##N, ma##M, mb##N);

#define INIT_C_8x4 \
do { \
INIT_C(0, 0); \
INIT_C(0, 1); \
INIT_C(1, 0); \
INIT_C(1, 1); \
INIT_C(2, 0); \
INIT_C(2, 1); \
INIT_C(3, 0); \
INIT_C(3, 1); \
} while (0);

#ifdef ALPHA_ONE
#define UPDATE_C(PG, PTR, DST, SRC) \
do { \
DST = svld1_f32((PG), (PTR)); \
DST = svadd_z((PG), SRC, DST); \
svst1_f32((PG), (PTR), DST); \
} while (0);
#else
#define UPDATE_C(PG, PTR, DST, SRC) \
do { \
DST = svld1_f32((PG), (PTR)); \
DST = svmad_z((PG), svalpha, SRC, DST); \
svst1_f32((PG), (PTR), DST); \
} while (0);
#endif

#ifdef ALPHA_ONE
int sbgemm_kernel_neoversev1_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
#else
int sbgemm_kernel_neoversev1_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
#endif
{
BLASLONG pad_k = (k + 3) & ~3;

svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1;
svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31,
vc0, vc1, vc2, vc3, vc4, vc5, vc6, vc7,
oc0, oc1, oc2, oc3, oc4, oc5, oc6, oc7;
svfloat32_t svalpha = svdup_f32(alpha);

svbool_t pg16 = svptrue_b16();
svbool_t pg16_low = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0);
svbool_t pg32 = svptrue_b32();
svbool_t pg32_low = svdupq_b32(1, 1, 0, 0);
svbool_t pg32_first = svdupq_b32(1, 0, 0, 0);

bfloat16_t *ptr_a = (bfloat16_t *)A;
bfloat16_t *ptr_b = (bfloat16_t *)B;
FLOAT *ptr_c = C;

bfloat16_t *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3;
bfloat16_t *ptr_b0, *ptr_b1;
FLOAT *ptr_c0, *ptr_c1, *ptr_c2, *ptr_c3;

for (BLASLONG j = 0; j < n / 4; j++) {
ptr_c0 = ptr_c;
ptr_c1 = ptr_c0 + ldc;
ptr_c2 = ptr_c1 + ldc;
ptr_c3 = ptr_c2 + ldc;
ptr_c += 4 * ldc;
ptr_a = (bfloat16_t *)A;

for (BLASLONG i = 0; i < m / 8; i++) {
ptr_a0 = ptr_a;
ptr_a += 8 * pad_k;

ptr_b0 = ptr_b;

INIT_C_8x4;

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
ma2 = svld1_bf16(pg16, ptr_a0 + 16);
ma3 = svld1_bf16(pg16, ptr_a0 + 24);

mb0 = svld1_bf16(pg16, ptr_b0);
mb1 = svld1_bf16(pg16, ptr_b0 + 8);

MATMUL(0, 0); MATMUL(0, 1);
MATMUL(1, 0); MATMUL(1, 1);
MATMUL(2, 0); MATMUL(2, 1);
MATMUL(3, 0); MATMUL(3, 1);

ptr_a0 += 32;
ptr_b0 += 16;
}

vc0 = svuzp1(mc00, mc10);
vc1 = svuzp1(mc20, mc30);
vc2 = svuzp2(mc00, mc10);
vc3 = svuzp2(mc20, mc30);
vc4 = svuzp1(mc01, mc11);
vc5 = svuzp1(mc21, mc31);
vc6 = svuzp2(mc01, mc11);
vc7 = svuzp2(mc21, mc31);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c0+4, oc1, vc1);
UPDATE_C(pg32, ptr_c1, oc2, vc2);
UPDATE_C(pg32, ptr_c1+4, oc3, vc3);
UPDATE_C(pg32, ptr_c2, oc4, vc4)
UPDATE_C(pg32, ptr_c2+4, oc5, vc5);
UPDATE_C(pg32, ptr_c3, oc6, vc6)
UPDATE_C(pg32, ptr_c3+4, oc7, vc7);

ptr_c0 += 8;
ptr_c1 += 8;
ptr_c2 += 8;
ptr_c3 += 8;
}

if (m & 4) {
ptr_a0 = ptr_a;
ptr_a += 4 * pad_k;
ptr_b0 = ptr_b;

INIT_C(0, 0); INIT_C(0, 1);
INIT_C(1, 0); INIT_C(1, 1);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
mb0 = svld1_bf16(pg16, ptr_b0);
mb1 = svld1_bf16(pg16, ptr_b0 + 8);

MATMUL(0, 0); MATMUL(0, 1);
MATMUL(1, 0); MATMUL(1, 1);

ptr_a0 += 16;
ptr_b0 += 16;
}

vc0 = svuzp1(mc00, mc10);
vc1 = svuzp2(mc00, mc10);
vc2 = svuzp1(mc01, mc11);
vc3 = svuzp2(mc01, mc11);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c1, oc1, vc1);
UPDATE_C(pg32, ptr_c2, oc2, vc2);
UPDATE_C(pg32, ptr_c3, oc3, vc3);

ptr_c0 += 4;
ptr_c1 += 4;
ptr_c2 += 4;
ptr_c3 += 4;
}

if (m & 2) {
ptr_a0 = ptr_a;
ptr_a += 2 * pad_k;
ptr_b0 = ptr_b;

INIT_C(0, 0); INIT_C(0, 1);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
mb0 = svld1_bf16(pg16, ptr_b0);
mb1 = svld1_bf16(pg16, ptr_b0 + 8);

MATMUL(0, 0); MATMUL(0, 1);

ptr_a0 += 8;
ptr_b0 += 16;
}

vc0 = svuzp1(mc00, mc00);
vc1 = svuzp2(mc00, mc00);
vc2 = svuzp1(mc01, mc01);
vc3 = svuzp2(mc01, mc01);

UPDATE_C(pg32_low, ptr_c0, oc0, vc0);
UPDATE_C(pg32_low, ptr_c1, oc1, vc1);
UPDATE_C(pg32_low, ptr_c2, oc2, vc2);
UPDATE_C(pg32_low, ptr_c3, oc3, vc3);

ptr_c0 += 2;
ptr_c1 += 2;
ptr_c2 += 2;
ptr_c3 += 2;
}

if (m & 1) {
ptr_a0 = ptr_a;
ptr_b0 = ptr_b;

INIT_C(0, 0); INIT_C(0, 1);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16_low, ptr_a0);
mb0 = svld1_bf16(pg16, ptr_b0);
mb1 = svld1_bf16(pg16, ptr_b0 + 8);

MATMUL(0, 0); MATMUL(0, 1);

ptr_a0 += 4;
ptr_b0 += 16;
}

vc1 = svuzp2(mc00, mc00);
vc3 = svuzp2(mc01, mc01);

UPDATE_C(pg32_first, ptr_c0, oc0, mc00);
UPDATE_C(pg32_first, ptr_c1, oc1, vc1);
UPDATE_C(pg32_first, ptr_c2, oc2, mc01);
UPDATE_C(pg32_first, ptr_c3, oc3, vc3);

}

ptr_b += 4 * pad_k;
}

if (n & 2) {
ptr_c0 = ptr_c;
ptr_c1 = ptr_c0 + ldc;
ptr_c += 2 * ldc;
ptr_a = (bfloat16_t *)A;

for (BLASLONG i = 0; i < m / 8; i++) {
ptr_a0 = ptr_a;
ptr_a += 8 * pad_k;

ptr_b0 = ptr_b;

INIT_C(0, 0);
INIT_C(1, 0);
INIT_C(2, 0);
INIT_C(3, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
ma2 = svld1_bf16(pg16, ptr_a0 + 16);
ma3 = svld1_bf16(pg16, ptr_a0 + 24);

mb0 = svld1_bf16(pg16, ptr_b0);

MATMUL(0, 0);
MATMUL(1, 0);
MATMUL(2, 0);
MATMUL(3, 0);

ptr_a0 += 32;
ptr_b0 += 8;
}

vc0 = svuzp1(mc00, mc10);
vc1 = svuzp1(mc20, mc30);
vc2 = svuzp2(mc00, mc10);
vc3 = svuzp2(mc20, mc30);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c0 + 4, oc1, vc1);
UPDATE_C(pg32, ptr_c1, oc2, vc2);
UPDATE_C(pg32, ptr_c1 + 4, oc3, vc3);

ptr_c0 += 8;
ptr_c1 += 8;
}

if (m & 4) {
ptr_a0 = ptr_a;
ptr_a += 4 * pad_k;
ptr_b0 = ptr_b;

INIT_C(0, 0);
INIT_C(1, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
mb0 = svld1_bf16(pg16, ptr_b0);
MATMUL(0, 0);
MATMUL(1, 0);
ptr_a0 += 16;
ptr_b0 += 8;
}

vc0 = svuzp1(mc00, mc10);
vc1 = svuzp2(mc00, mc10);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c1, oc1, vc1);

ptr_c0 += 4;
ptr_c1 += 4;
}

if (m & 2) {
ptr_a0 = ptr_a;
ptr_a += 2 * pad_k;
ptr_b0 = ptr_b;

INIT_C(0, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
mb0 = svld1_bf16(pg16, ptr_b0);

MATMUL(0, 0);

ptr_a0 += 8;
ptr_b0 += 8;
}

vc0 = svuzp1(mc00, mc00);
vc1 = svuzp2(mc00, mc00);
UPDATE_C(pg32_low, ptr_c0, oc0, vc0);
UPDATE_C(pg32_low, ptr_c1, oc1, vc1);

ptr_c0 += 2;
ptr_c1 += 2;

}

if (m & 1) {
ptr_a0 = ptr_a;
ptr_b0 = ptr_b;
INIT_C(0, 0);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16_low, ptr_a0);
mb0 = svld1_bf16(pg16, ptr_b0);
MATMUL(0, 0);
ptr_a0 += 4;
ptr_b0 += 8;
}
vc1 = svuzp2(mc00, mc00);

UPDATE_C(pg32_first, ptr_c0, oc0, mc00);
UPDATE_C(pg32_first, ptr_c1, oc1, vc1);
}

ptr_b += 2 * pad_k;
}

if (n & 1) {
ptr_c0 = ptr_c;
ptr_a = (bfloat16_t *)A;

for (BLASLONG i = 0; i < m / 8; i++) {
ptr_a0 = ptr_a;
ptr_a += 8 * pad_k;

ptr_b0 = ptr_b;

INIT_C(0, 0);
INIT_C(1, 0);
INIT_C(2, 0);
INIT_C(3, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
ma2 = svld1_bf16(pg16, ptr_a0 + 16);
ma3 = svld1_bf16(pg16, ptr_a0 + 24);

mb0 = svld1_bf16(pg16_low, ptr_b0);

MATMUL(0, 0);
MATMUL(1, 0);
MATMUL(2, 0);
MATMUL(3, 0);

ptr_a0 += 32;
ptr_b0 += 4;
}

vc0 = svuzp1(mc00, mc10);
vc1 = svuzp1(mc20, mc30);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c0 + 4, oc1, vc1);

ptr_c0 += 8;
}

if (m & 4) {
ptr_a0 = ptr_a;
ptr_a += 4 * pad_k;
ptr_b0 = ptr_b;
INIT_C(0, 0);
INIT_C(1, 0);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
mb0 = svld1_bf16(pg16_low, ptr_b0);
MATMUL(0, 0);
MATMUL(1, 0);
ptr_a0 += 16;
ptr_b0 += 4;
}
vc0 = svuzp1(mc00, mc10);
UPDATE_C(pg32, ptr_c0, oc0, vc0);
ptr_c0 += 4;
}

if (m & 2) {
ptr_a0 = ptr_a;
ptr_a += 2 * pad_k;
ptr_b0 = ptr_b;

INIT_C(0, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
mb0 = svld1_bf16(pg16_low, ptr_b0);

MATMUL(0, 0);

ptr_a0 += 8;
ptr_b0 += 4;
}
vc0 = svuzp1(mc00, mc00);
UPDATE_C(pg32_low, ptr_c0, oc0, vc0);
ptr_c0 += 2;
}

if (m & 1) {
ptr_a0 = ptr_a;
ptr_b0 = ptr_b;
INIT_C(0, 0);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16_low, ptr_a0);
mb0 = svld1_bf16(pg16_low, ptr_b0);
MATMUL(0, 0);
ptr_a0 += 4;
ptr_b0 += 4;
}
UPDATE_C(pg32_first, ptr_c0, oc0, mc00);
}
}

return 0;
}


+ 75
- 54
kernel/arm64/sbgemm_ncopy_4_neoversev1.c View File

@@ -1,5 +1,5 @@
/***************************************************************************
* Copyright (c) 2024, The OpenBLAS Project
* Copyright (c) 2024-2025, The OpenBLAS Project
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -37,8 +37,17 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
a_offset = a;
b_offset = b;

svbool_t pg16 = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0);
bfloat16_t zero_value_bf16;
*((uint16_t *)(&zero_value_bf16)) = 0;

svbool_t pg16_all = svptrue_b16(); // 16 elements for sve-256 machine.
svbool_t pg16_first_8 = svwhilelt_b16(0, 8);

svbfloat16_t v0, v1, v2, v3;
svuint64_t t0, t1;

BLASLONG rest = m & 7;
svbool_t pg16_rest = svwhilelt_b16_s32(0, rest);

for (BLASLONG j = 0; j < n / 4; j++) {
a_offsetx[0] = a_offset;
@@ -47,33 +56,41 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
a_offsetx[3] = a_offsetx[2] + lda;
a_offset += 4 * lda;

for (BLASLONG i = 0; i < m / 4; i++) {
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
v1 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[1]);
v2 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[2]);
v3 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[3]);
for (BLASLONG i = 0; i < m / 8; i++) {
v0 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[0]);
v1 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[1]);
v2 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[2]);
v3 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[3]);

svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 4, v1);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 8, v2);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 12, v3);
t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1));
t1 = svzip1_u64(svreinterpret_u64_bf16(v2), svreinterpret_u64_bf16(v3));

b_offset += 16;
a_offsetx[0] += 4;
a_offsetx[1] += 4;
a_offsetx[2] += 4;
a_offsetx[3] += 4;
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0));
svst1_bf16(pg16_all, (bfloat16_t *)b_offset + 16,
svreinterpret_bf16_u64(t1));

a_offsetx[0] += 8;
a_offsetx[1] += 8;
a_offsetx[2] += 8;
a_offsetx[3] += 8;

b_offset += 32;
}

if (m & 3) {
BLASLONG rest = m & 3;
for (BLASLONG col = 0; col < 4; col++) {
b_offset[4 * col] = a_offsetx[col][0];
b_offset[4 * col + 1] = rest == 1 ? 0 : a_offsetx[col][1];
b_offset[4 * col + 2] = rest <= 2 ? 0 : a_offsetx[col][2];
b_offset[4 * col + 3] = rest <= 3 ? 0 : a_offsetx[col][3];
}
b_offset += 16;
if (rest) { // remainder along k dim
v0 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[0]);
v1 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[1]);
v2 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[2]);
v3 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[3]);

t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1));
t1 = svzip1_u64(svreinterpret_u64_bf16(v2), svreinterpret_u64_bf16(v3));

svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0));
svst1_bf16(pg16_all, (bfloat16_t *)b_offset + 16,
svreinterpret_bf16_u64(t1));

b_offset += 32;
}
}

@@ -82,46 +99,50 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
a_offsetx[1] = a_offsetx[0] + lda;
a_offset += 2 * lda;

for (BLASLONG i = 0; i < m / 4; i++) {
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
v1 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[1]);
svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 4, v1);
for (BLASLONG i = 0; i < m / 8; i++) {
v0 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[0]);
v1 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[1]);

t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1));
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0));

b_offset += 8;
a_offsetx[0] += 4;
a_offsetx[1] += 4;
b_offset += 16;
a_offsetx[0] += 8;
a_offsetx[1] += 8;
}

if (m & 3) {
BLASLONG rest = m & 3;
for (BLASLONG col = 0; col < 2; col++) {
b_offset[4 * col] = a_offsetx[col][0];
b_offset[4 * col + 1] = rest == 1 ? 0 : a_offsetx[col][1];
b_offset[4 * col + 2] = rest <= 2 ? 0 : a_offsetx[col][2];
b_offset[4 * col + 3] = rest <= 3 ? 0 : a_offsetx[col][3];
}
b_offset += 8;
if (rest) { // remainder along k dim
v0 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[0]);
v1 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[1]);

t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1));
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0));

b_offset += 16;
}
}

if (n & 1) {
a_offsetx[0] = a_offset;
for (BLASLONG i = 0; i < m / 4; i++) {
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
b_offset += 4;
a_offsetx[0] += 4;

for (BLASLONG i = 0; i < m / 8; i++) {
v0 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[0]);
v1 = svdup_bf16(zero_value_bf16);

t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1));
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0));

b_offset += 16;
a_offsetx[0] += 8;
}
if (m & 3) {
BLASLONG rest = m & 3;
b_offset[0] = a_offsetx[0][0];
b_offset[1] = rest == 1 ? 0 : a_offsetx[0][1];
b_offset[2] = rest <= 2 ? 0 : a_offsetx[0][2];
b_offset[3] = rest <= 3 ? 0 : a_offsetx[0][3];
if (rest) { // remainder along k dim
v0 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[0]);
v1 = svdup_bf16(zero_value_bf16);
t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1));
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0));
}
}

return 0;
}


+ 0
- 180
kernel/arm64/sbgemm_ncopy_8_neoversev1.c View File

@@ -1,180 +0,0 @@
/***************************************************************************
* Copyright (c) 2024, The OpenBLAS Project
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of the OpenBLAS project nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* *****************************************************************************/

#include <arm_sve.h>

#include "common.h"

int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
IFLOAT *a_offset;
IFLOAT *a_offsetx[8];
IFLOAT *b_offset;
a_offset = a;
b_offset = b;

svbool_t pg16 = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0);
svbfloat16_t v0, v1, v2, v3, v4, v5, v6, v7;

for (BLASLONG j = 0; j < n / 8; j++) {
a_offsetx[0] = a_offset;
a_offsetx[1] = a_offsetx[0] + lda;
a_offsetx[2] = a_offsetx[1] + lda;
a_offsetx[3] = a_offsetx[2] + lda;
a_offsetx[4] = a_offsetx[3] + lda;
a_offsetx[5] = a_offsetx[4] + lda;
a_offsetx[6] = a_offsetx[5] + lda;
a_offsetx[7] = a_offsetx[6] + lda;
a_offset += 8 * lda;

for (BLASLONG i = 0; i < m / 4; i++) {
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
v1 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[1]);
v2 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[2]);
v3 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[3]);
v4 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[4]);
v5 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[5]);
v6 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[6]);
v7 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[7]);

svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 4, v1);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 8, v2);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 12, v3);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 16, v4);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 20, v5);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 24, v6);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 28, v7);

b_offset += 32;
a_offsetx[0] += 4;
a_offsetx[1] += 4;
a_offsetx[2] += 4;
a_offsetx[3] += 4;
a_offsetx[4] += 4;
a_offsetx[5] += 4;
a_offsetx[6] += 4;
a_offsetx[7] += 4;
}

if (m & 3) {
BLASLONG rest = m & 3;
for (BLASLONG col = 0; col < 8; col++) {
b_offset[4 * col] = a_offsetx[col][0];
b_offset[4 * col + 1] = rest == 1 ? 0 : a_offsetx[col][1];
b_offset[4 * col + 2] = rest <= 2 ? 0 : a_offsetx[col][2];
b_offset[4 * col + 3] = rest <= 3 ? 0 : a_offsetx[col][3];
}
b_offset += 32;
}
}

if (n & 4) {
a_offsetx[0] = a_offset;
a_offsetx[1] = a_offsetx[0] + lda;
a_offsetx[2] = a_offsetx[1] + lda;
a_offsetx[3] = a_offsetx[2] + lda;
a_offset += 4 * lda;

for (BLASLONG i = 0; i < m / 4; i++) {
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
v1 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[1]);
v2 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[2]);
v3 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[3]);

svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 4, v1);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 8, v2);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 12, v3);

b_offset += 16;
a_offsetx[0] += 4;
a_offsetx[1] += 4;
a_offsetx[2] += 4;
a_offsetx[3] += 4;
}

if (m & 3) {
BLASLONG rest = m & 3;
for (BLASLONG col = 0; col < 4; col++) {
b_offset[4 * col] = a_offsetx[col][0];
b_offset[4 * col + 1] = rest == 1 ? 0 : a_offsetx[col][1];
b_offset[4 * col + 2] = rest <= 2 ? 0 : a_offsetx[col][2];
b_offset[4 * col + 3] = rest <= 3 ? 0 : a_offsetx[col][3];
}
b_offset += 16;
}
}

if (n & 2) {
a_offsetx[0] = a_offset;
a_offsetx[1] = a_offsetx[0] + lda;
a_offset += 2 * lda;

for (BLASLONG i = 0; i < m / 4; i++) {
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
v1 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[1]);
svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
svst1_bf16(pg16, (bfloat16_t *)b_offset + 4, v1);

b_offset += 8;
a_offsetx[0] += 4;
a_offsetx[1] += 4;
}

if (m & 3) {
BLASLONG rest = m & 3;
for (BLASLONG col = 0; col < 2; col++) {
b_offset[4 * col] = a_offsetx[col][0];
b_offset[4 * col + 1] = rest == 1 ? 0 : a_offsetx[col][1];
b_offset[4 * col + 2] = rest <= 2 ? 0 : a_offsetx[col][2];
b_offset[4 * col + 3] = rest <= 3 ? 0 : a_offsetx[col][3];
}
b_offset += 8;
}
}

if (n & 1) {
a_offsetx[0] = a_offset;
for (BLASLONG i = 0; i < m / 4; i++) {
v0 = svld1_bf16(pg16, (bfloat16_t *)a_offsetx[0]);
svst1_bf16(pg16, (bfloat16_t *)b_offset, v0);
b_offset += 4;
a_offsetx[0] += 4;
}
if (m & 3) {
BLASLONG rest = m & 3;
b_offset[0] = a_offsetx[0][0];
b_offset[1] = rest == 1 ? 0 : a_offsetx[0][1];
b_offset[2] = rest <= 2 ? 0 : a_offsetx[0][2];
b_offset[3] = rest <= 3 ? 0 : a_offsetx[0][3];
}
}

return 0;
}


+ 280
- 67
kernel/arm64/sbgemm_tcopy_4_neoversev1.c View File

@@ -1,5 +1,5 @@
/***************************************************************************
* Copyright (c) 2024, The OpenBLAS Project
* Copyright (c) 2024-2025, The OpenBLAS Project
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -25,62 +25,214 @@
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* *****************************************************************************/
#include <arm_neon.h>

#include "common.h"
#include <arm_neon.h>
#include <arm_sve.h>

int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
IFLOAT *a_offset, *a_offset0, *a_offset1, *a_offset2, *a_offset3;
BLASLONG pad_m = ((m + 7) & ~7);
BLASLONG rest = (m & 7); // rest along m dim

IFLOAT *a_offset;
IFLOAT *a_offset0, *a_offset1, *a_offset2, *a_offset3;
IFLOAT *a_offset4, *a_offset5, *a_offset6, *a_offset7;

IFLOAT *b_offset;
IFLOAT *b_offset0, *b_offset1;

a_offset = a;
b_offset = b;

uint16x4_t v0_h, v1_h, v2_h, v3_h, v4_h, v5_h, v6_h, v7_h;
svuint16_t c0, c1, c2, c3, c4, c5, c6, c7;
svuint16_t t0, t1, t2, t3;
svuint32_t m00, m01, m10, m11;
svuint64_t st_offsets_0, st_offsets_1;

svbool_t pg16_first_4 = svwhilelt_b16(0, 4);
svbool_t pg16_first_8 = svwhilelt_b16(0, 8);

svbool_t pg64_first_4 = svwhilelt_b64(0, 4);
u_int32_t sizeof_u64 = 8;
u_int64_t _st_offsets_0[4] = {
0 * sizeof_u64,
1 * sizeof_u64,
4 * sizeof_u64,
5 * sizeof_u64,
};

u_int64_t _st_offsets_1[4] = {
2 * sizeof_u64,
3 * sizeof_u64,
6 * sizeof_u64,
7 * sizeof_u64,
};

st_offsets_0 = svld1_u64(pg64_first_4, _st_offsets_0);
st_offsets_1 = svld1_u64(pg64_first_4, _st_offsets_1);

for (BLASLONG j = 0; j < n / 8; j++) {
a_offset0 = a_offset;
a_offset1 = a_offset0 + lda;
a_offset2 = a_offset1 + lda;
a_offset3 = a_offset2 + lda;
a_offset4 = a_offset3 + lda;
a_offset5 = a_offset4 + lda;
a_offset6 = a_offset5 + lda;
a_offset7 = a_offset6 + lda;
a_offset += 8;

b_offset0 = b_offset;
b_offset1 = b_offset0 + 4 * pad_m;

b_offset += 8 * pad_m;
for (BLASLONG i = 0; i < m / 8; i++) {
// transpose 8x8 matrix and pack into two 4x8 block consists of two 2x4
// small blocks
c0 = svld1_u16(pg16_first_8, a_offset0);
c1 = svld1_u16(pg16_first_8, a_offset1);
c2 = svld1_u16(pg16_first_8, a_offset2);
c3 = svld1_u16(pg16_first_8, a_offset3);
c4 = svld1_u16(pg16_first_8, a_offset4);
c5 = svld1_u16(pg16_first_8, a_offset5);
c6 = svld1_u16(pg16_first_8, a_offset6);
c7 = svld1_u16(pg16_first_8, a_offset7);

t0 = svzip1_u16(c0, c1);
t1 = svzip1_u16(c2, c3);
t2 = svzip1_u16(c4, c5);
t3 = svzip1_u16(c6, c7);

m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1));
m10 = svzip2_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1));
m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3));
m11 = svzip2_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3));

svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0,
st_offsets_0, svreinterpret_u64_u32(m00));
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0,
st_offsets_1, svreinterpret_u64_u32(m01));
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset1,
st_offsets_0, svreinterpret_u64_u32(m10));
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset1,
st_offsets_1, svreinterpret_u64_u32(m11));

a_offset0 += 8 * lda;
a_offset1 += 8 * lda;
a_offset2 += 8 * lda;
a_offset3 += 8 * lda;
a_offset4 += 8 * lda;
a_offset5 += 8 * lda;
a_offset6 += 8 * lda;
a_offset7 += 8 * lda;

b_offset0 += 32;
b_offset1 += 32;
}

if (rest) {
c0 = svld1_u16(pg16_first_8, a_offset0);
c1 = (rest >= 2 ? svld1_u16(pg16_first_8, a_offset1) : svdup_u16(0));
c2 = (rest >= 3 ? svld1_u16(pg16_first_8, a_offset2) : svdup_u16(0));
c3 = (rest >= 4 ? svld1_u16(pg16_first_8, a_offset3) : svdup_u16(0));
c4 = (rest >= 5 ? svld1_u16(pg16_first_8, a_offset4) : svdup_u16(0));
c5 = (rest >= 6 ? svld1_u16(pg16_first_8, a_offset5) : svdup_u16(0));
c6 = (rest == 7 ? svld1_u16(pg16_first_8, a_offset6) : svdup_u16(0));
c7 = (svdup_u16(0));

t0 = svzip1_u16(c0, c1);
t1 = svzip1_u16(c2, c3);
t2 = svzip1_u16(c4, c5);
t3 = svzip1_u16(c6, c7);

m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1));
m10 = svzip2_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1));
m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3));
m11 = svzip2_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3));

for (BLASLONG j = 0; j < n / 4; j++) {
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0,
st_offsets_0, svreinterpret_u64_u32(m00));
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0,
st_offsets_1, svreinterpret_u64_u32(m01));
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset1,
st_offsets_0, svreinterpret_u64_u32(m10));
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset1,
st_offsets_1, svreinterpret_u64_u32(m11));
}
}

if (n & 4) {
a_offset0 = a_offset;
a_offset1 = a_offset0 + lda;
a_offset2 = a_offset1 + lda;
a_offset3 = a_offset2 + lda;
a_offset4 = a_offset3 + lda;
a_offset5 = a_offset4 + lda;
a_offset6 = a_offset5 + lda;
a_offset7 = a_offset6 + lda;
a_offset += 4;

for (BLASLONG i = 0; i < m / 4; i++) {
v0_h = vld1_u16(a_offset0);
v1_h = vld1_u16(a_offset1);
v2_h = vld1_u16(a_offset2);
v3_h = vld1_u16(a_offset3);
b_offset0 = b_offset;
b_offset += 4 * pad_m;

v4_h = vtrn1_u16(v0_h, v1_h);
v5_h = vtrn2_u16(v0_h, v1_h);
v6_h = vtrn1_u16(v2_h, v3_h);
v7_h = vtrn2_u16(v2_h, v3_h);
for (BLASLONG i = 0; i < m / 8; i++) {
// transpose 8x8 matrix and pack into two 4x8 block consists of two 2x4
// small blocks
c0 = svld1_u16(pg16_first_4, a_offset0);
c1 = svld1_u16(pg16_first_4, a_offset1);
c2 = svld1_u16(pg16_first_4, a_offset2);
c3 = svld1_u16(pg16_first_4, a_offset3);
c4 = svld1_u16(pg16_first_4, a_offset4);
c5 = svld1_u16(pg16_first_4, a_offset5);
c6 = svld1_u16(pg16_first_4, a_offset6);
c7 = svld1_u16(pg16_first_4, a_offset7);

v0_h = (uint16x4_t)vtrn1_u32((uint32x2_t)v4_h, (uint32x2_t)v6_h);
v1_h = (uint16x4_t)vtrn1_u32((uint32x2_t)v5_h, (uint32x2_t)v7_h);
v2_h = (uint16x4_t)vtrn2_u32((uint32x2_t)v4_h, (uint32x2_t)v6_h);
v3_h = (uint16x4_t)vtrn2_u32((uint32x2_t)v5_h, (uint32x2_t)v7_h);
t0 = svzip1_u16(c0, c1);
t1 = svzip1_u16(c2, c3);
t2 = svzip1_u16(c4, c5);
t3 = svzip1_u16(c6, c7);

vst1_u16(b_offset, v0_h);
vst1_u16(b_offset + 4, v1_h);
vst1_u16(b_offset + 8, v2_h);
vst1_u16(b_offset + 12, v3_h);
m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1));
m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3));
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0,
st_offsets_0, svreinterpret_u64_u32(m00));
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0,
st_offsets_1, svreinterpret_u64_u32(m01));

b_offset += 16;
a_offset0 += 4 * lda;
a_offset1 += 4 * lda;
a_offset2 += 4 * lda;
a_offset3 += 4 * lda;
a_offset0 += 8 * lda;
a_offset1 += 8 * lda;
a_offset2 += 8 * lda;
a_offset3 += 8 * lda;
a_offset4 += 8 * lda;
a_offset5 += 8 * lda;
a_offset6 += 8 * lda;
a_offset7 += 8 * lda;

b_offset0 += 32;
}

if (m & 3) {
BLASLONG rest = m & 3;
for (BLASLONG line = 0; line < 4; line++) {
b_offset[line * 4] = a_offset0[line];
b_offset[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line];
b_offset[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line];
b_offset[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line];
}
b_offset += 16;
if (rest) {
c0 = svld1_u16(pg16_first_4, a_offset0); // rest >= 1
c1 = (rest >= 2 ? svld1_u16(pg16_first_4, a_offset1) : svdup_u16(0));
c2 = (rest >= 3 ? svld1_u16(pg16_first_4, a_offset2) : svdup_u16(0));
c3 = (rest >= 4 ? svld1_u16(pg16_first_4, a_offset3) : svdup_u16(0));
c4 = (rest >= 5 ? svld1_u16(pg16_first_4, a_offset4) : svdup_u16(0));
c5 = (rest >= 6 ? svld1_u16(pg16_first_4, a_offset5) : svdup_u16(0));
c6 = (rest == 7 ? svld1_u16(pg16_first_4, a_offset6) : svdup_u16(0));
c7 = (svdup_u16(0));

t0 = svzip1_u16(c0, c1);
t1 = svzip1_u16(c2, c3);
t2 = svzip1_u16(c4, c5);
t3 = svzip1_u16(c6, c7);

m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1));
m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3));

svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0,
st_offsets_0, svreinterpret_u64_u32(m00));
svst1_scatter_u64offset_u64(pg64_first_4, (u_int64_t *)b_offset0,
st_offsets_1, svreinterpret_u64_u32(m01));
}
}

@@ -89,31 +241,54 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
a_offset1 = a_offset0 + lda;
a_offset2 = a_offset1 + lda;
a_offset3 = a_offset2 + lda;
a_offset4 = a_offset3 + lda;
a_offset5 = a_offset4 + lda;
a_offset6 = a_offset5 + lda;
a_offset7 = a_offset6 + lda;
a_offset += 2;

for (BLASLONG i = 0; i < m / 4; i++) {
b_offset0 = b_offset;
b_offset1 = b_offset0 + 8;

b_offset += 2 * pad_m;

for (BLASLONG i = 0; i < m / 8; i++) {
for (BLASLONG line = 0; line < 2; line++) {
b_offset[line * 4] = a_offset0[line];
b_offset[line * 4 + 1] = a_offset1[line];
b_offset[line * 4 + 2] = a_offset2[line];
b_offset[line * 4 + 3] = a_offset3[line];
b_offset0[line * 4] = a_offset0[line];
b_offset0[line * 4 + 1] = a_offset1[line];
b_offset0[line * 4 + 2] = a_offset2[line];
b_offset0[line * 4 + 3] = a_offset3[line];

b_offset1[line * 4] = a_offset4[line];
b_offset1[line * 4 + 1] = a_offset5[line];
b_offset1[line * 4 + 2] = a_offset6[line];
b_offset1[line * 4 + 3] = a_offset7[line];
}
b_offset += 8;
a_offset0 += 4 * lda;
a_offset1 += 4 * lda;
a_offset2 += 4 * lda;
a_offset3 += 4 * lda;
b_offset0 += 16;
b_offset1 += 16;

a_offset0 += 8 * lda;
a_offset1 += 8 * lda;
a_offset2 += 8 * lda;
a_offset3 += 8 * lda;
a_offset4 += 8 * lda;
a_offset5 += 8 * lda;
a_offset6 += 8 * lda;
a_offset7 += 8 * lda;
}

if (m & 3) {
BLASLONG rest = m & 3;
if (rest) {
for (BLASLONG line = 0; line < 2; line++) {
b_offset[line * 4] = a_offset0[line];
b_offset[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line];
b_offset[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line];
b_offset[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line];
b_offset0[line * 4] = a_offset0[line];
b_offset0[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line];
b_offset0[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line];
b_offset0[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line];

b_offset1[line * 4] = rest <= 4 ? 0 : a_offset4[line];
b_offset1[line * 4 + 1] = rest <= 5 ? 0 : a_offset5[line];
b_offset1[line * 4 + 2] = rest <= 6 ? 0 : a_offset6[line];
b_offset1[line * 4 + 3] = 0;
}
b_offset += 8;
}
}

@@ -122,27 +297,65 @@ int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
a_offset1 = a_offset0 + lda;
a_offset2 = a_offset1 + lda;
a_offset3 = a_offset2 + lda;
a_offset4 = a_offset3 + lda;
a_offset5 = a_offset4 + lda;
a_offset6 = a_offset5 + lda;
a_offset7 = a_offset6 + lda;

for (BLASLONG i = 0; i < m / 4; i++) {
b_offset[0] = *a_offset0;
b_offset[1] = *a_offset1;
b_offset[2] = *a_offset2;
b_offset[3] = *a_offset3;
b_offset += 4;
a_offset0 += 4 * lda;
a_offset1 += 4 * lda;
a_offset2 += 4 * lda;
a_offset3 += 4 * lda;
for (BLASLONG i = 0; i < m / 8; i++) {
b_offset[0] = a_offset0[0];
b_offset[1] = a_offset1[0];
b_offset[2] = a_offset2[0];
b_offset[3] = a_offset3[0];

b_offset[4] = 0;
b_offset[5] = 0;
b_offset[6] = 0;
b_offset[7] = 0;

b_offset[8] = a_offset4[0];
b_offset[9] = a_offset5[0];
b_offset[10] = a_offset6[0];
b_offset[11] = a_offset7[0];

b_offset[12] = 0;
b_offset[13] = 0;
b_offset[14] = 0;
b_offset[15] = 0;

b_offset += 16;
a_offset0 += 8 * lda;
a_offset1 += 8 * lda;
a_offset2 += 8 * lda;
a_offset3 += 8 * lda;
a_offset4 += 8 * lda;
a_offset5 += 8 * lda;
a_offset6 += 8 * lda;
a_offset7 += 8 * lda;
}

if (m & 3) {
BLASLONG rest = m & 3;
if (rest) {
b_offset[0] = *a_offset0;
b_offset[1] = rest == 1 ? 0 : *a_offset1;
b_offset[2] = rest <= 2 ? 0 : *a_offset2;
b_offset[3] = rest <= 3 ? 0 : *a_offset3;

b_offset[4] = 0;
b_offset[5] = 0;
b_offset[6] = 0;
b_offset[7] = 0;

b_offset[8] = rest <= 4 ? 0 : *a_offset4;
b_offset[9] = rest <= 5 ? 0 : *a_offset5;
b_offset[10] = rest <= 6 ? 0 : *a_offset6;
b_offset[11] = 0;

b_offset[12] = 0;
b_offset[13] = 0;
b_offset[14] = 0;
b_offset[15] = 0;
}
}

return 0;
}


+ 0
- 200
kernel/arm64/sbgemm_tcopy_8_neoversev1.c View File

@@ -1,200 +0,0 @@
/***************************************************************************
* Copyright (c) 2024, The OpenBLAS Project
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* 3. Neither the name of the OpenBLAS project nor the names of
* its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
* *****************************************************************************/
#include <arm_neon.h>

#include "common.h"

int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) {
IFLOAT *a_offset, *a_offset0, *a_offset1, *a_offset2, *a_offset3;
IFLOAT *b_offset;
a_offset = a;
b_offset = b;

uint16x8_t v0, v1, v2, v3, v4, v5, v6, v7;
uint16x4_t v0_h, v1_h, v2_h, v3_h, v4_h, v5_h, v6_h, v7_h;

for (BLASLONG j = 0; j < n / 8; j++) {
a_offset0 = a_offset;
a_offset1 = a_offset0 + lda;
a_offset2 = a_offset1 + lda;
a_offset3 = a_offset2 + lda;
a_offset += 8;

for (BLASLONG i = 0; i < m / 4; i++) {
v0 = vld1q_u16(a_offset0);
v1 = vld1q_u16(a_offset1);
v2 = vld1q_u16(a_offset2);
v3 = vld1q_u16(a_offset3);

v4 = vtrn1q_u16(v0, v1);
v5 = vtrn2q_u16(v0, v1);
v6 = vtrn1q_u16(v2, v3);
v7 = vtrn2q_u16(v2, v3);

v0 = (uint16x8_t)vtrn1q_u32((uint32x4_t)v4, (uint32x4_t)v6);
v1 = (uint16x8_t)vtrn1q_u32((uint32x4_t)v5, (uint32x4_t)v7);
v2 = (uint16x8_t)vtrn2q_u32((uint32x4_t)v4, (uint32x4_t)v6);
v3 = (uint16x8_t)vtrn2q_u32((uint32x4_t)v5, (uint32x4_t)v7);

vst1_u16(b_offset, vget_low_u16(v0));
vst1_u16(b_offset + 4, vget_low_u16(v1));
vst1_u16(b_offset + 8, vget_low_u16(v2));
vst1_u16(b_offset + 12, vget_low_u16(v3));
vst1_u16(b_offset + 16, vget_high_u16(v0));
vst1_u16(b_offset + 20, vget_high_u16(v1));
vst1_u16(b_offset + 24, vget_high_u16(v2));
vst1_u16(b_offset + 28, vget_high_u16(v3));

b_offset += 32;
a_offset0 += 4 * lda;
a_offset1 += 4 * lda;
a_offset2 += 4 * lda;
a_offset3 += 4 * lda;
}

if (m & 3) {
BLASLONG rest = m & 3;
for (BLASLONG line = 0; line < 8; line++) {
b_offset[line * 4] = a_offset0[line];
b_offset[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line];
b_offset[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line];
b_offset[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line];
}
b_offset += 32;
}
}

if (n & 4) {
a_offset0 = a_offset;
a_offset1 = a_offset0 + lda;
a_offset2 = a_offset1 + lda;
a_offset3 = a_offset2 + lda;
a_offset += 4;

for (BLASLONG i = 0; i < m / 4; i++) {
v0_h = vld1_u16(a_offset0);
v1_h = vld1_u16(a_offset1);
v2_h = vld1_u16(a_offset2);
v3_h = vld1_u16(a_offset3);

v4_h = vtrn1_u16(v0_h, v1_h);
v5_h = vtrn2_u16(v0_h, v1_h);
v6_h = vtrn1_u16(v2_h, v3_h);
v7_h = vtrn2_u16(v2_h, v3_h);

v0_h = (uint16x4_t)vtrn1_u32((uint32x2_t)v4_h, (uint32x2_t)v6_h);
v1_h = (uint16x4_t)vtrn1_u32((uint32x2_t)v5_h, (uint32x2_t)v7_h);
v2_h = (uint16x4_t)vtrn2_u32((uint32x2_t)v4_h, (uint32x2_t)v6_h);
v3_h = (uint16x4_t)vtrn2_u32((uint32x2_t)v5_h, (uint32x2_t)v7_h);

vst1_u16(b_offset, v0_h);
vst1_u16(b_offset + 4, v1_h);
vst1_u16(b_offset + 8, v2_h);
vst1_u16(b_offset + 12, v3_h);

b_offset += 16;
a_offset0 += 4 * lda;
a_offset1 += 4 * lda;
a_offset2 += 4 * lda;
a_offset3 += 4 * lda;
}

if (m & 3) {
BLASLONG rest = m & 3;
for (BLASLONG line = 0; line < 4; line++) {
b_offset[line * 4] = a_offset0[line];
b_offset[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line];
b_offset[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line];
b_offset[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line];
}
b_offset += 16;
}
}

if (n & 2) {
a_offset0 = a_offset;
a_offset1 = a_offset0 + lda;
a_offset2 = a_offset1 + lda;
a_offset3 = a_offset2 + lda;
a_offset += 2;

for (BLASLONG i = 0; i < m / 4; i++) {
for (BLASLONG line = 0; line < 2; line++) {
b_offset[line * 4] = a_offset0[line];
b_offset[line * 4 + 1] = a_offset1[line];
b_offset[line * 4 + 2] = a_offset2[line];
b_offset[line * 4 + 3] = a_offset3[line];
}
b_offset += 8;
a_offset0 += 4 * lda;
a_offset1 += 4 * lda;
a_offset2 += 4 * lda;
a_offset3 += 4 * lda;
}

if (m & 3) {
BLASLONG rest = m & 3;
for (BLASLONG line = 0; line < 2; line++) {
b_offset[line * 4] = a_offset0[line];
b_offset[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line];
b_offset[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line];
b_offset[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line];
}
b_offset += 8;
}
}

if (n & 1) {
a_offset0 = a_offset;
a_offset1 = a_offset0 + lda;
a_offset2 = a_offset1 + lda;
a_offset3 = a_offset2 + lda;

for (BLASLONG i = 0; i < m / 4; i++) {
b_offset[0] = *a_offset0;
b_offset[1] = *a_offset1;
b_offset[2] = *a_offset2;
b_offset[3] = *a_offset3;
b_offset += 4;
a_offset0 += 4 * lda;
a_offset1 += 4 * lda;
a_offset2 += 4 * lda;
a_offset3 += 4 * lda;
}

if (m & 3) {
BLASLONG rest = m & 3;
b_offset[0] = *a_offset0;
b_offset[1] = rest == 1 ? 0 : *a_offset1;
b_offset[2] = rest <= 2 ? 0 : *a_offset2;
b_offset[3] = rest <= 3 ? 0 : *a_offset3;
}
}
return 0;
}


+ 3
- 3
param.h View File

@@ -3553,12 +3553,12 @@ is a big desktop or server with abundant cache rather than a phone or embedded d
#define SWITCH_RATIO 16
#define GEMM_PREFERED_SIZE 8
#endif
#undef SBGEMM_ALIGN_K
#define SBGEMM_ALIGN_K 4

#undef SBGEMM_ALIGN_K
#undef SBGEMM_DEFAULT_UNROLL_M
#undef SBGEMM_DEFAULT_UNROLL_N
#define SBGEMM_DEFAULT_UNROLL_M 8
#define SBGEMM_ALIGN_K 8
#define SBGEMM_DEFAULT_UNROLL_M 4
#define SBGEMM_DEFAULT_UNROLL_N 4

#define SGEMM_DEFAULT_UNROLL_M 16


Loading…
Cancel
Save