Browse Source

Add optimized BGEMM for NEOVERSEN2 target

This re-uses the existing NEOVERSEN2 8x4 `sbgemm` kernel to implement `bgemm`.
pull/5399/head
Chris Sidebottom 2 months ago
parent
commit
ea2faf0c9a
7 changed files with 189 additions and 118 deletions
  1. +16
    -1
      kernel/arm64/KERNEL.NEOVERSEN2
  2. +1
    -6
      kernel/arm64/KERNEL.NEOVERSEV2
  3. +14
    -4
      kernel/arm64/sbgemm_kernel_8x4_neoversen2.c
  4. +144
    -99
      kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c
  5. +8
    -2
      param.h
  6. +4
    -4
      test/compare_sgemm_bgemm.c
  7. +2
    -2
      test/compare_sgemv_bgemv.c

+ 16
- 1
kernel/arm64/KERNEL.NEOVERSEN2 View File

@@ -188,6 +188,20 @@ ZGEMMOTCOPY = ../generic/zgemm_tcopy_$(ZGEMM_UNROLL_N).c
ZGEMMONCOPYOBJ = zgemm_oncopy$(TSUFFIX).$(SUFFIX)
ZGEMMOTCOPYOBJ = zgemm_otcopy$(TSUFFIX).$(SUFFIX)

ifeq ($(BUILD_BFLOAT16), 1)
BGEMM_BETA = sbgemm_beta_neoversen2.c
BGEMMKERNEL = sbgemm_kernel_$(BGEMM_UNROLL_M)x$(BGEMM_UNROLL_N)_neoversen2.c
BGEMMINCOPY = sbgemm_ncopy_$(BGEMM_UNROLL_M)_neoversen2.c
BGEMMITCOPY = sbgemm_tcopy_$(BGEMM_UNROLL_M)_neoversen2.c
BGEMMONCOPY = sbgemm_ncopy_$(BGEMM_UNROLL_N)_neoversen2.c
BGEMMOTCOPY = sbgemm_tcopy_$(BGEMM_UNROLL_N)_neoversen2.c
BGEMMINCOPYOBJ = bgemm_incopy$(TSUFFIX).$(SUFFIX)
BGEMMITCOPYOBJ = bgemm_itcopy$(TSUFFIX).$(SUFFIX)
BGEMMONCOPYOBJ = bgemm_oncopy$(TSUFFIX).$(SUFFIX)
BGEMMOTCOPYOBJ = bgemm_otcopy$(TSUFFIX).$(SUFFIX)
BGEMVTKERNEL = sbgemv_t_bfdot.c
BGEMVNKERNEL = bgemv_n_sve_v3x4.c

SBGEMM_BETA = sbgemm_beta_neoversen2.c
SBGEMMKERNEL = sbgemm_kernel_$(SBGEMM_UNROLL_M)x$(SBGEMM_UNROLL_N)_neoversen2.c
SBGEMMINCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_M)_neoversen2.c
@@ -199,4 +213,5 @@ SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX)
SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX)
SBGEMMOTCOPYOBJ = sbgemm_otcopy$(TSUFFIX).$(SUFFIX)
SBGEMVTKERNEL = sbgemv_t_bfdot.c
SBGEMVNKERNEL = sbgemv_n_neon.c
SBGEMVNKERNEL = sbgemv_n_neon.c
endif

+ 1
- 6
kernel/arm64/KERNEL.NEOVERSEV2 View File

@@ -1,6 +1 @@
include $(KERNELDIR)/KERNEL.ARMV8SVE

ifeq ($(BUILD_BFLOAT16), 1)
SBGEMVTKERNEL = sbgemv_t_bfdot.c
SBGEMVNKERNEL = sbgemv_n_neon.c
endif
include $(KERNELDIR)/KERNEL.NEOVERSEN2

+ 14
- 4
kernel/arm64/sbgemm_kernel_8x4_neoversen2.c View File

@@ -1,5 +1,5 @@
/***************************************************************************
* Copyright (c) 2022, The OpenBLAS Project
* Copyright (c) 2022,2025 The OpenBLAS Project
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -33,13 +33,23 @@
#define ALPHA_ONE
#include "sbgemm_kernel_8x4_neoversen2_impl.c"
#undef ALPHA_ONE
#undef UPDATE_C
#include "sbgemm_kernel_8x4_neoversen2_impl.c"

int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B,
FLOAT *C, BLASLONG ldc) {
if (alpha == 1.0f)
return sbgemm_kernel_neoversen2_alpha_one(m, n, k, alpha, A, B, C, ldc);
#ifdef BGEMM
bfloat16_t alpha_bf16;
memcpy(&alpha_bf16, &alpha, sizeof(bfloat16_t));
float alpha_f32 = vcvtah_f32_bf16(alpha_bf16);
#else
float alpha_f32 = alpha;
#endif

if (alpha_f32 == 1.0f)
return gemm_kernel_neoversen2_alpha_one(m, n, k, alpha, A, B, C, ldc);
else
return sbgemm_kernel_neoversen2_alpha(m, n, k, alpha, A, B, C, ldc);
return gemm_kernel_neoversen2_alpha(m, n, k, alpha, A, B, C, ldc);

return 0;
}

+ 144
- 99
kernel/arm64/sbgemm_kernel_8x4_neoversen2_impl.c View File

@@ -1,5 +1,5 @@
/***************************************************************************
* Copyright (c) 2022, The OpenBLAS Project
* Copyright (c) 2022,2025 The OpenBLAS Project
* All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -46,49 +46,94 @@
INIT_C(3, 1); \
} while (0);

#ifdef BGEMM
#ifdef ALPHA_ONE
#define UPDATE_C(PG, PTR, DST, SRC) \
do { \
DST = svld1_f32((PG), (PTR)); \
DST = svadd_z((PG), SRC, DST); \
svst1_f32((PG), (PTR), DST); \
#define UPDATE_C(PG16, PG32, PTR, SRC) \
do { \
tmp32 = svreinterpret_f32_u32(svld1uh_u32((PG16), (uint16_t*)PTR)); \
tmp32 = svadd_z((PG32), SRC, tmp32); \
tmp16 = svcvt_bf16_f32_z((PG32), tmp32); \
tmp16 = svuzp1_bf16(tmp16, tmp16); \
svst1_bf16((PG16), (PTR), tmp16); \
} while (0)
#else
#define UPDATE_C(PG16, PG32, PTR, SRC) \
do { \
tmp32 = svreinterpret_f32_u32(svld1uh_u32((PG16), (uint16_t*)PTR)); \
tmp32 = svmad_z((PG32), svalpha, SRC, tmp32); \
tmp16 = svcvt_bf16_f32_z((PG32), tmp32); \
tmp16 = svuzp1_bf16(tmp16, tmp16); \
svst1_bf16((PG16), (PTR), tmp16); \
} while (0)
#endif
#else
#ifdef ALPHA_ONE
#define UPDATE_C(PG16, PG32, PTR, SRC) \
do { \
tmp32 = svld1_f32((PG32), (PTR)); \
tmp32 = svadd_z((PG32), SRC, tmp32); \
svst1_f32((PG32), (PTR), tmp32); \
} while (0);
#else
#define UPDATE_C(PG, PTR, DST, SRC) \
do { \
DST = svld1_f32((PG), (PTR)); \
DST = svmad_z((PG), svalpha, SRC, DST); \
svst1_f32((PG), (PTR), DST); \
#define UPDATE_C(PG16, PG32, PTR, SRC) \
do { \
tmp32 = svld1_f32((PG32), (PTR)); \
tmp32 = svmad_z((PG32), svalpha, SRC, tmp32); \
svst1_f32((PG32), (PTR), tmp32); \
} while (0);
#endif
#endif

#ifdef BGEMM
#define OUTPUT_FLOAT bfloat16_t
#else
#define OUTPUT_FLOAT float
#endif

#ifdef ALPHA_ONE
int sbgemm_kernel_neoversen2_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
static int gemm_kernel_neoversen2_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
#else
int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
static int gemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT * A, IFLOAT * B, FLOAT * C, BLASLONG ldc)
#endif
{
BLASLONG pad_k = (k + 3) & ~3;

svbfloat16_t ma0, ma1, ma2, ma3, mb0, mb1;
svfloat32_t mc00, mc01, mc10, mc11, mc20, mc21, mc30, mc31,
vc0, vc1, vc2, vc3, vc4, vc5, vc6, vc7,
oc0, oc1, oc2, oc3, oc4, oc5, oc6, oc7;
vc0, vc1, vc2, vc3, vc4, vc5, vc6, vc7;

#ifndef ALPHA_ONE
#ifdef BGEMM
bfloat16_t alpha_bf16;
memcpy(&alpha_bf16, &alpha, sizeof(bfloat16_t));
svfloat32_t svalpha = svdup_f32(vcvtah_f32_bf16(alpha_bf16));
#else
svfloat32_t svalpha = svdup_f32(alpha);
#endif
#endif

svbool_t pg16 = svptrue_b16();
svbool_t pg16_low = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0);
svbool_t pg32 = svptrue_b32();
svbool_t pg32_low = svdupq_b32(1, 1, 0, 0);
svbool_t pg32_first = svdupq_b32(1, 0, 0, 0);
svbool_t pg32_first_4 = svdupq_b32(1, 1, 1, 1);
svbool_t pg32_first_2 = svdupq_b32(1, 1, 0, 0);
svbool_t pg32_first_1 = svdupq_b32(1, 0, 0, 0);
svbool_t pg16_first_8 = svdupq_b16(1, 1, 1, 1, 1, 1, 1, 1);
svbool_t pg16_first_4 = svdupq_b16(1, 1, 1, 1, 0, 0, 0, 0);
#ifdef BGEMM
svbool_t pg16_first_2 = svdupq_b16(1, 1, 0, 0, 0, 0, 0, 0);
svbool_t pg16_first_1 = svdupq_b16(1, 0, 0, 0, 0, 0, 0, 0);
#endif

bfloat16_t *ptr_a = (bfloat16_t *)A;
bfloat16_t *ptr_b = (bfloat16_t *)B;
FLOAT *ptr_c = C;
OUTPUT_FLOAT *ptr_c = (OUTPUT_FLOAT*)C;

bfloat16_t *ptr_a0;
bfloat16_t *ptr_b0;
OUTPUT_FLOAT *ptr_c0, *ptr_c1, *ptr_c2, *ptr_c3;

bfloat16_t *ptr_a0, *ptr_a1, *ptr_a2, *ptr_a3;
bfloat16_t *ptr_b0, *ptr_b1;
FLOAT *ptr_c0, *ptr_c1, *ptr_c2, *ptr_c3;
svfloat32_t tmp32;
#ifdef BGEMM
svbfloat16_t tmp16;
#endif

for (BLASLONG j = 0; j < n / 4; j++) {
ptr_c0 = ptr_c;
@@ -107,13 +152,13 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
INIT_C_8x4;

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
ma2 = svld1_bf16(pg16, ptr_a0 + 16);
ma3 = svld1_bf16(pg16, ptr_a0 + 24);
ma0 = svld1_bf16(pg16_first_8, ptr_a0);
ma1 = svld1_bf16(pg16_first_8, ptr_a0 + 8);
ma2 = svld1_bf16(pg16_first_8, ptr_a0 + 16);
ma3 = svld1_bf16(pg16_first_8, ptr_a0 + 24);

mb0 = svld1_bf16(pg16, ptr_b0);
mb1 = svld1_bf16(pg16, ptr_b0 + 8);
mb0 = svld1_bf16(pg16_first_8, ptr_b0);
mb1 = svld1_bf16(pg16_first_8, ptr_b0 + 8);

MATMUL(0, 0); MATMUL(0, 1);
MATMUL(1, 0); MATMUL(1, 1);
@@ -133,14 +178,14 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
vc6 = svuzp2(mc01, mc11);
vc7 = svuzp2(mc21, mc31);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c0+4, oc1, vc1);
UPDATE_C(pg32, ptr_c1, oc2, vc2);
UPDATE_C(pg32, ptr_c1+4, oc3, vc3);
UPDATE_C(pg32, ptr_c2, oc4, vc4)
UPDATE_C(pg32, ptr_c2+4, oc5, vc5);
UPDATE_C(pg32, ptr_c3, oc6, vc6)
UPDATE_C(pg32, ptr_c3+4, oc7, vc7);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0, vc0);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0+4, vc1);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c1, vc2);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c1+4, vc3);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c2, vc4);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c2+4, vc5);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c3, vc6);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c3+4, vc7);

ptr_c0 += 8;
ptr_c1 += 8;
@@ -157,10 +202,10 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
INIT_C(1, 0); INIT_C(1, 1);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
mb0 = svld1_bf16(pg16, ptr_b0);
mb1 = svld1_bf16(pg16, ptr_b0 + 8);
ma0 = svld1_bf16(pg16_first_8, ptr_a0);
ma1 = svld1_bf16(pg16_first_8, ptr_a0 + 8);
mb0 = svld1_bf16(pg16_first_8, ptr_b0);
mb1 = svld1_bf16(pg16_first_8, ptr_b0 + 8);

MATMUL(0, 0); MATMUL(0, 1);
MATMUL(1, 0); MATMUL(1, 1);
@@ -174,10 +219,10 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
vc2 = svuzp1(mc01, mc11);
vc3 = svuzp2(mc01, mc11);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c1, oc1, vc1);
UPDATE_C(pg32, ptr_c2, oc2, vc2);
UPDATE_C(pg32, ptr_c3, oc3, vc3);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0, vc0);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c1, vc1);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c2, vc2);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c3, vc3);

ptr_c0 += 4;
ptr_c1 += 4;
@@ -192,9 +237,9 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp

INIT_C(0, 0); INIT_C(0, 1);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
mb0 = svld1_bf16(pg16, ptr_b0);
mb1 = svld1_bf16(pg16, ptr_b0 + 8);
ma0 = svld1_bf16(pg16_first_8, ptr_a0);
mb0 = svld1_bf16(pg16_first_8, ptr_b0);
mb1 = svld1_bf16(pg16_first_8, ptr_b0 + 8);

MATMUL(0, 0); MATMUL(0, 1);

@@ -207,10 +252,10 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
vc2 = svuzp1(mc01, mc01);
vc3 = svuzp2(mc01, mc01);

UPDATE_C(pg32_low, ptr_c0, oc0, vc0);
UPDATE_C(pg32_low, ptr_c1, oc1, vc1);
UPDATE_C(pg32_low, ptr_c2, oc2, vc2);
UPDATE_C(pg32_low, ptr_c3, oc3, vc3);
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c0, vc0);
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c1, vc1);
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c2, vc2);
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c3, vc3);

ptr_c0 += 2;
ptr_c1 += 2;
@@ -224,9 +269,9 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp

INIT_C(0, 0); INIT_C(0, 1);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16_low, ptr_a0);
mb0 = svld1_bf16(pg16, ptr_b0);
mb1 = svld1_bf16(pg16, ptr_b0 + 8);
ma0 = svld1_bf16(pg16_first_4, ptr_a0);
mb0 = svld1_bf16(pg16_first_8, ptr_b0);
mb1 = svld1_bf16(pg16_first_8, ptr_b0 + 8);

MATMUL(0, 0); MATMUL(0, 1);

@@ -237,10 +282,10 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
vc1 = svuzp2(mc00, mc00);
vc3 = svuzp2(mc01, mc01);

UPDATE_C(pg32_first, ptr_c0, oc0, mc00);
UPDATE_C(pg32_first, ptr_c1, oc1, vc1);
UPDATE_C(pg32_first, ptr_c2, oc2, mc01);
UPDATE_C(pg32_first, ptr_c3, oc3, vc3);
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c0, mc00);
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c1, vc1);
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c2, mc01);
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c3, vc3);

}

@@ -265,12 +310,12 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
INIT_C(3, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
ma2 = svld1_bf16(pg16, ptr_a0 + 16);
ma3 = svld1_bf16(pg16, ptr_a0 + 24);
ma0 = svld1_bf16(pg16_first_8, ptr_a0);
ma1 = svld1_bf16(pg16_first_8, ptr_a0 + 8);
ma2 = svld1_bf16(pg16_first_8, ptr_a0 + 16);
ma3 = svld1_bf16(pg16_first_8, ptr_a0 + 24);

mb0 = svld1_bf16(pg16, ptr_b0);
mb0 = svld1_bf16(pg16_first_8, ptr_b0);

MATMUL(0, 0);
MATMUL(1, 0);
@@ -286,10 +331,10 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
vc2 = svuzp2(mc00, mc10);
vc3 = svuzp2(mc20, mc30);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c0 + 4, oc1, vc1);
UPDATE_C(pg32, ptr_c1, oc2, vc2);
UPDATE_C(pg32, ptr_c1 + 4, oc3, vc3);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0, vc0);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0 + 4, vc1);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c1, vc2);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c1 + 4, vc3);

ptr_c0 += 8;
ptr_c1 += 8;
@@ -304,9 +349,9 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
INIT_C(1, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
mb0 = svld1_bf16(pg16, ptr_b0);
ma0 = svld1_bf16(pg16_first_8, ptr_a0);
ma1 = svld1_bf16(pg16_first_8, ptr_a0 + 8);
mb0 = svld1_bf16(pg16_first_8, ptr_b0);
MATMUL(0, 0);
MATMUL(1, 0);
ptr_a0 += 16;
@@ -316,8 +361,8 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
vc0 = svuzp1(mc00, mc10);
vc1 = svuzp2(mc00, mc10);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c1, oc1, vc1);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0, vc0);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c1, vc1);

ptr_c0 += 4;
ptr_c1 += 4;
@@ -331,8 +376,8 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
INIT_C(0, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
mb0 = svld1_bf16(pg16, ptr_b0);
ma0 = svld1_bf16(pg16_first_8, ptr_a0);
mb0 = svld1_bf16(pg16_first_8, ptr_b0);

MATMUL(0, 0);

@@ -342,8 +387,8 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp

vc0 = svuzp1(mc00, mc00);
vc1 = svuzp2(mc00, mc00);
UPDATE_C(pg32_low, ptr_c0, oc0, vc0);
UPDATE_C(pg32_low, ptr_c1, oc1, vc1);
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c0, vc0);
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c1, vc1);

ptr_c0 += 2;
ptr_c1 += 2;
@@ -355,16 +400,16 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
ptr_b0 = ptr_b;
INIT_C(0, 0);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16_low, ptr_a0);
mb0 = svld1_bf16(pg16, ptr_b0);
ma0 = svld1_bf16(pg16_first_4, ptr_a0);
mb0 = svld1_bf16(pg16_first_8, ptr_b0);
MATMUL(0, 0);
ptr_a0 += 4;
ptr_b0 += 8;
}
vc1 = svuzp2(mc00, mc00);

UPDATE_C(pg32_first, ptr_c0, oc0, mc00);
UPDATE_C(pg32_first, ptr_c1, oc1, vc1);
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c0, mc00);
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c1, vc1);
}

ptr_b += 2 * pad_k;
@@ -386,12 +431,12 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
INIT_C(3, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
ma2 = svld1_bf16(pg16, ptr_a0 + 16);
ma3 = svld1_bf16(pg16, ptr_a0 + 24);
ma0 = svld1_bf16(pg16_first_8, ptr_a0);
ma1 = svld1_bf16(pg16_first_8, ptr_a0 + 8);
ma2 = svld1_bf16(pg16_first_8, ptr_a0 + 16);
ma3 = svld1_bf16(pg16_first_8, ptr_a0 + 24);

mb0 = svld1_bf16(pg16_low, ptr_b0);
mb0 = svld1_bf16(pg16_first_4, ptr_b0);

MATMUL(0, 0);
MATMUL(1, 0);
@@ -405,8 +450,8 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
vc0 = svuzp1(mc00, mc10);
vc1 = svuzp1(mc20, mc30);

UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg32, ptr_c0 + 4, oc1, vc1);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0, vc0);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0 + 4, vc1);

ptr_c0 += 8;
}
@@ -418,16 +463,16 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
INIT_C(0, 0);
INIT_C(1, 0);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
ma1 = svld1_bf16(pg16, ptr_a0 + 8);
mb0 = svld1_bf16(pg16_low, ptr_b0);
ma0 = svld1_bf16(pg16_first_8, ptr_a0);
ma1 = svld1_bf16(pg16_first_8, ptr_a0 + 8);
mb0 = svld1_bf16(pg16_first_4, ptr_b0);
MATMUL(0, 0);
MATMUL(1, 0);
ptr_a0 += 16;
ptr_b0 += 4;
}
vc0 = svuzp1(mc00, mc10);
UPDATE_C(pg32, ptr_c0, oc0, vc0);
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0, vc0);
ptr_c0 += 4;
}

@@ -439,8 +484,8 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
INIT_C(0, 0);

for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16, ptr_a0);
mb0 = svld1_bf16(pg16_low, ptr_b0);
ma0 = svld1_bf16(pg16_first_8, ptr_a0);
mb0 = svld1_bf16(pg16_first_4, ptr_b0);

MATMUL(0, 0);

@@ -448,7 +493,7 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
ptr_b0 += 4;
}
vc0 = svuzp1(mc00, mc00);
UPDATE_C(pg32_low, ptr_c0, oc0, vc0);
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c0, vc0);
ptr_c0 += 2;
}

@@ -457,13 +502,13 @@ int sbgemm_kernel_neoversen2_alpha(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alp
ptr_b0 = ptr_b;
INIT_C(0, 0);
for (BLASLONG p = 0; p < pad_k; p += 4) {
ma0 = svld1_bf16(pg16_low, ptr_a0);
mb0 = svld1_bf16(pg16_low, ptr_b0);
ma0 = svld1_bf16(pg16_first_4, ptr_a0);
mb0 = svld1_bf16(pg16_first_4, ptr_b0);
MATMUL(0, 0);
ptr_a0 += 4;
ptr_b0 += 4;
}
UPDATE_C(pg32_first, ptr_c0, oc0, mc00);
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c0, mc00);
}
}



+ 8
- 2
param.h View File

@@ -3644,11 +3644,17 @@ is a big desktop or server with abundant cache rather than a phone or embedded d
#define SWITCH_RATIO 16
#endif

#undef SBGEMM_ALIGN_K
#define SBGEMM_ALIGN_K 4
#undef BGEMM_ALIGN_K
#undef BGEMM_DEFAULT_UNROLL_M
#undef BGEMM_DEFAULT_UNROLL_N
#define BGEMM_ALIGN_K 4
#define BGEMM_DEFAULT_UNROLL_M 8
#define BGEMM_DEFAULT_UNROLL_N 4

#undef SBGEMM_ALIGN_K
#undef SBGEMM_DEFAULT_UNROLL_M
#undef SBGEMM_DEFAULT_UNROLL_N
#define SBGEMM_ALIGN_K 4
#define SBGEMM_DEFAULT_UNROLL_M 8
#define SBGEMM_DEFAULT_UNROLL_N 4



+ 4
- 4
test/compare_sgemm_bgemm.c View File

@@ -100,7 +100,7 @@ main (int argc, char *argv[])

SGEMM (&transA, &transB, &m, &n, &k, &alpha, A,
&m, B, &k, &beta, C, &m);
BGEMM (&transA, &transB, &m, &n, &k, &alpha_bf16, (bfloat16*) AA,
BGEMM (&transA, &transB, &m, &n, &k, &alpha_bf16, (bfloat16*)AA,
&m, (bfloat16*)BB, &k, &beta_bf16, (bfloat16*)CC, &m);

for (i = 0; i < n; i++)
@@ -126,15 +126,15 @@ main (int argc, char *argv[])
}
if (!is_close(float16to32(CC[i * m + j]), truncate_float32_to_bfloat16(C[i * m + j]), 0.01, 0.001)) {
#ifdef DEBUG
printf("Mismatch at i=%d, j=%d, k=%ld: CC=%.6f, C=%.6f\n",
printf("Mismatch at i=%d, j=%d, k=%d: CC=%.6f, C=%.6f\n",
i, j, k, float16to32(CC[i * m + j]), truncate_float32_to_bfloat16(C[i * m + j]));
#endif
ret++;
}

if (!is_close(float16to32(CC[i * m + j]), truncate_float32_to_bfloat16(DD[i * m + j]), 0.0001, 0.00001)) {
if (!is_close(float16to32(CC[i * m + j]), truncate_float32_to_bfloat16(DD[i * m + j]), 0.01, 0.001)) {
#ifdef DEBUG
printf("Mismatch at i=%d, j=%d, k=%ld: CC=%.6f, DD=%.6f\n",
printf("Mismatch at i=%d, j=%d, k=%d: CC=%.6f, DD=%.6f\n",
i, j, k, float16to32(CC[i * m + j]), truncate_float32_to_bfloat16(DD[i * m + j]));
#endif
ret++;


+ 2
- 2
test/compare_sgemv_bgemv.c View File

@@ -119,7 +119,7 @@ int main(int argc, char *argv[])
if (!is_close(float16to32(CC[j << l]), truncate_float32_to_bfloat16(C[j << l]), 0.01, 0.001))
{
#ifdef DEBUG
printf("Mismatch at trans=%c, alpha=%.2f, beta=%.2f, i=%d, j=%d, k=%ld: CC=%.6f, C=%.6f\n",
printf("Mismatch at trans=%c, alpha=%.2f, beta=%.2f, i=%d, j=%d, k=%d: CC=%.6f, C=%.6f\n",
transA, alpha, beta, i, j, k, float16to32(CC[j << l]), truncate_float32_to_bfloat16(C[j << l]));
#endif
ret++;
@@ -127,7 +127,7 @@ int main(int argc, char *argv[])
if (!is_close(float16to32(CC[j << l]), truncate_float32_to_bfloat16(DD[j]), 0.001, 0.0001))
{
#ifdef DEBUG
printf("Mismatch at trans=%c, alpha=%.2f, beta=%.2f, i=%d, j=%d, k=%ld: CC=%.6f, DD=%.6f\n",
printf("Mismatch at trans=%c, alpha=%.2f, beta=%.2f, i=%d, j=%d, k=%d: CC=%.6f, DD=%.6f\n",
transA, alpha, beta, i, j, k, float16to32(CC[j << l]), truncate_float32_to_bfloat16(DD[j]));
#endif
ret++;


Loading…
Cancel
Save