Optimize SBGEMM / BGEMM for NEOVERSEV1 furtherpull/4054/merge
@@ -34,15 +34,15 @@ SGEMVTKERNEL = gemv_t_sve_v1x3.c | |||||
DGEMVTKERNEL = gemv_t_sve_v1x3.c | DGEMVTKERNEL = gemv_t_sve_v1x3.c | ||||
ifeq ($(BUILD_BFLOAT16), 1) | ifeq ($(BUILD_BFLOAT16), 1) | ||||
BGEMM_BETA = bgemm_beta_neon.c | BGEMM_BETA = bgemm_beta_neon.c | ||||
BGEMMKERNEL = bgemm_kernel_$(BGEMM_UNROLL_M)x$(BGEMM_UNROLL_N)_neoversev1.c | |||||
BGEMMKERNEL = bgemm_kernel_2vlx4_neoversev1.c | |||||
ifneq ($(BGEMM_UNROLL_M), $(BGEMM_UNROLL_N)) | ifneq ($(BGEMM_UNROLL_M), $(BGEMM_UNROLL_N)) | ||||
BGEMMINCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_M)_neoversev1.c | |||||
BGEMMITCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_M)_neoversev1.c | |||||
BGEMMINCOPY = bgemm_ncopy_2vl_neoversev1.c | |||||
BGEMMITCOPY = bgemm_tcopy_2vl_neoversev1.c | |||||
BGEMMINCOPYOBJ = bgemm_incopy$(TSUFFIX).$(SUFFIX) | BGEMMINCOPYOBJ = bgemm_incopy$(TSUFFIX).$(SUFFIX) | ||||
BGEMMITCOPYOBJ = bgemm_itcopy$(TSUFFIX).$(SUFFIX) | BGEMMITCOPYOBJ = bgemm_itcopy$(TSUFFIX).$(SUFFIX) | ||||
endif | endif | ||||
BGEMMONCOPY = sbgemm_ncopy_$(BGEMM_UNROLL_N)_neoversev1.c | |||||
BGEMMOTCOPY = sbgemm_tcopy_$(BGEMM_UNROLL_N)_neoversev1.c | |||||
BGEMMONCOPY = bgemm_ncopy_4_neoversev1.c | |||||
BGEMMOTCOPY = bgemm_tcopy_4_neoversev1.c | |||||
BGEMMONCOPYOBJ = bgemm_oncopy$(TSUFFIX).$(SUFFIX) | BGEMMONCOPYOBJ = bgemm_oncopy$(TSUFFIX).$(SUFFIX) | ||||
BGEMMOTCOPYOBJ = bgemm_otcopy$(TSUFFIX).$(SUFFIX) | BGEMMOTCOPYOBJ = bgemm_otcopy$(TSUFFIX).$(SUFFIX) | ||||
@@ -50,15 +50,15 @@ BGEMVTKERNEL = sbgemv_t_bfdot.c | |||||
BGEMVNKERNEL = bgemv_n_sve_v3x4.c | BGEMVNKERNEL = bgemv_n_sve_v3x4.c | ||||
SBGEMM_BETA = sbgemm_beta_neoversev1.c | SBGEMM_BETA = sbgemm_beta_neoversev1.c | ||||
SBGEMMKERNEL = sbgemm_kernel_$(SBGEMM_UNROLL_M)x$(SBGEMM_UNROLL_N)_neoversev1.c | |||||
SBGEMMKERNEL = bgemm_kernel_2vlx4_neoversev1.c | |||||
ifneq ($(SBGEMM_UNROLL_M), $(SBGEMM_UNROLL_N)) | ifneq ($(SBGEMM_UNROLL_M), $(SBGEMM_UNROLL_N)) | ||||
SBGEMMINCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_M)_neoversev1.c | |||||
SBGEMMITCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_M)_neoversev1.c | |||||
SBGEMMINCOPY = bgemm_ncopy_2vl_neoversev1.c | |||||
SBGEMMITCOPY = bgemm_tcopy_2vl_neoversev1.c | |||||
SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX) | SBGEMMINCOPYOBJ = sbgemm_incopy$(TSUFFIX).$(SUFFIX) | ||||
SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX) | SBGEMMITCOPYOBJ = sbgemm_itcopy$(TSUFFIX).$(SUFFIX) | ||||
endif | endif | ||||
SBGEMMONCOPY = sbgemm_ncopy_$(SBGEMM_UNROLL_N)_neoversev1.c | |||||
SBGEMMOTCOPY = sbgemm_tcopy_$(SBGEMM_UNROLL_N)_neoversev1.c | |||||
SBGEMMONCOPY = bgemm_ncopy_4_neoversev1.c | |||||
SBGEMMOTCOPY = bgemm_tcopy_4_neoversev1.c | |||||
SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX) | SBGEMMONCOPYOBJ = sbgemm_oncopy$(TSUFFIX).$(SUFFIX) | ||||
SBGEMMOTCOPYOBJ = sbgemm_otcopy$(TSUFFIX).$(SUFFIX) | SBGEMMOTCOPYOBJ = sbgemm_otcopy$(TSUFFIX).$(SUFFIX) | ||||
@@ -32,20 +32,26 @@ | |||||
#include "common.h" | #include "common.h" | ||||
#define ALPHA_ONE | #define ALPHA_ONE | ||||
#include "bgemm_kernel_4x4_neoversev1_impl.c" | |||||
#include "bgemm_kernel_2vlx4_neoversev1_impl.c" | |||||
#undef ALPHA_ONE | #undef ALPHA_ONE | ||||
#undef UPDATE_C | #undef UPDATE_C | ||||
#include "bgemm_kernel_4x4_neoversev1_impl.c" | |||||
#undef UPDATE_C2 | |||||
#undef UPDATE_C1 | |||||
#include "bgemm_kernel_2vlx4_neoversev1_impl.c" | |||||
int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, | int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, | ||||
FLOAT *C, BLASLONG ldc) { | FLOAT *C, BLASLONG ldc) { | ||||
#ifdef BGEMM | |||||
bfloat16_t alpha_bf16; | bfloat16_t alpha_bf16; | ||||
memcpy(&alpha_bf16, &alpha, sizeof(bfloat16_t)); | memcpy(&alpha_bf16, &alpha, sizeof(bfloat16_t)); | ||||
float alpha_f32 = vcvtah_f32_bf16(alpha_bf16); | float alpha_f32 = vcvtah_f32_bf16(alpha_bf16); | ||||
#else | |||||
float alpha_f32 = alpha; | |||||
#endif | |||||
if (alpha_f32 == 1.0f) | if (alpha_f32 == 1.0f) | ||||
return bgemm_kernel_neoversev1_alpha_one(m, n, k, alpha, A, B, C, ldc); | |||||
return bgemm_kernel_neoversev1_alpha_one(m, n, k, alpha_f32, A, B, C, ldc); | |||||
else | else | ||||
return bgemm_kernel_neoversev1_alpha(m, n, k, alpha, A, B, C, ldc); | |||||
return bgemm_kernel_neoversev1_alpha(m, n, k, alpha_f32, A, B, C, ldc); | |||||
return 0; | return 0; | ||||
} | } |
@@ -0,0 +1,437 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include <arm_sve.h> | |||||
#include <arm_neon.h> | |||||
#include "common.h" | |||||
#ifdef BGEMM | |||||
#ifdef ALPHA_ONE | |||||
#define TO16 vcvth_bf16_f32 | |||||
#define TO32 vcvtah_f32_bf16 | |||||
#define UPDATE_C(PG, PTR, DST, SRC) \ | |||||
do { \ | |||||
DST = svreinterpret_f32_u32(svld1uh_u32((pghalf), (uint16_t*)PTR)); \ | |||||
DST = svadd_z((PG), SRC, DST); \ | |||||
svtmp16 = svcvt_bf16_f32_z((PG), DST); \ | |||||
svtmp16 = svuzp1_bf16(svtmp16, svtmp16); \ | |||||
svst1_bf16((pghalf), (PTR), svtmp16); \ | |||||
} while (0); | |||||
#define UPDATE_C2(ptr, tmp, vector) \ | |||||
*(ptr) = TO16(vector[0] + TO32(*ptr)); \ | |||||
*(ptr + 1) = TO16(vector[1] + TO32(*(ptr + 1))); | |||||
#define UPDATE_C1(ptr, value) *ptr = TO16(TO32(*ptr) + (value)) | |||||
#else | |||||
#define UPDATE_C(PG, PTR, DST, SRC) \ | |||||
do { \ | |||||
DST = svreinterpret_f32_u32(svld1uh_u32((pghalf), (uint16_t*)PTR)); \ | |||||
DST = svmad_z((PG), svalpha, SRC, DST); \ | |||||
svtmp16 = svcvt_bf16_f32_z((PG), DST); \ | |||||
svtmp16 = svuzp1_bf16(svtmp16, svtmp16); \ | |||||
svst1_bf16((pghalf), (PTR), svtmp16); \ | |||||
} while (0); | |||||
#define UPDATE_C2(ptr, tmp, vector) \ | |||||
*(ptr) = TO16(vector[0] * alpha + TO32(*ptr)); \ | |||||
*(ptr + 1) = TO16(vector[1] * alpha + TO32(*(ptr + 1))); | |||||
#define UPDATE_C1(ptr, value) *ptr = TO16(TO32(*ptr) + (value) * alpha) | |||||
#endif | |||||
#else | |||||
#ifdef ALPHA_ONE | |||||
#define UPDATE_C(PG, PTR, DST, SRC) \ | |||||
do { \ | |||||
DST = svld1_f32((PG), (PTR)); \ | |||||
DST = svadd_z((PG), SRC, DST); \ | |||||
svst1_f32((PG), (PTR), DST); \ | |||||
} while (0); | |||||
#define UPDATE_C2(ptr, tmp, vector) \ | |||||
tmp = vld1_f32(ptr); \ | |||||
tmp = vadd_f32(vector, tmp); \ | |||||
vst1_f32(ptr, tmp); | |||||
#define UPDATE_C1(ptr, value) *ptr = *ptr + (value) | |||||
#else | |||||
#define UPDATE_C(PG, PTR, DST, SRC) \ | |||||
do { \ | |||||
DST = svld1_f32((PG), (PTR)); \ | |||||
DST = svmad_z((PG), svalpha, SRC, DST); \ | |||||
svst1_f32((PG), (PTR), DST); \ | |||||
} while (0); | |||||
#define UPDATE_C2(ptr, tmp, vector) \ | |||||
tmp = vld1_f32(ptr); \ | |||||
tmp = vmla_n_f32(tmp, vector, alpha); \ | |||||
vst1_f32(ptr, tmp); | |||||
#define UPDATE_C1(ptr, value) *ptr = *ptr + (value) * alpha | |||||
#endif | |||||
#endif | |||||
#ifdef BGEMM | |||||
#define OUTPUT_FLOAT bfloat16_t | |||||
#else | |||||
#define OUTPUT_FLOAT float | |||||
#endif | |||||
#ifdef ALPHA_ONE | |||||
static int bgemm_kernel_neoversev1_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, | |||||
float alpha, IFLOAT *AA, IFLOAT *BB, | |||||
FLOAT *CC, BLASLONG ldc) | |||||
#else | |||||
static int bgemm_kernel_neoversev1_alpha(BLASLONG m, BLASLONG n, BLASLONG k, | |||||
float alpha, IFLOAT *AA, IFLOAT *BB, FLOAT *CC, | |||||
BLASLONG ldc) | |||||
#endif | |||||
{ | |||||
BLASLONG pad_k = (k + 3) & ~3; | |||||
#ifndef ALPHA_ONE | |||||
svfloat32_t svalpha = svdup_f32(alpha); | |||||
#endif | |||||
bfloat16_t *ptr_a = (bfloat16_t *)AA; | |||||
bfloat16_t *ptr_b = (bfloat16_t *)BB; | |||||
OUTPUT_FLOAT *ptr_c =(OUTPUT_FLOAT*)CC; | |||||
bfloat16_t *ptr_a0; | |||||
bfloat16_t *ptr_b0; | |||||
OUTPUT_FLOAT *ptr_c0, *ptr_c1, *ptr_c2, *ptr_c3; | |||||
svfloat32_t tmp0, tmp1, tmp2, tmp3; | |||||
#ifdef BGEMM | |||||
svbfloat16_t svtmp16; | |||||
#else | |||||
float32x2_t tmp4, tmp5, tmp6, tmp7; | |||||
#endif | |||||
const int sve_size_bf16 = svcnth(); | |||||
const int num_accumulators = sve_size_bf16 >> 1; | |||||
svbool_t pgtrue = svptrue_b16(); | |||||
#ifdef BGEMM | |||||
// For BF16 load/store we use half the vector size | |||||
svbool_t pghalf = svwhilelt_b16(0, num_accumulators); | |||||
#endif | |||||
// N values are 4x2 packed matrices | |||||
int n_step = 0; | |||||
const int n2 = n & -2; | |||||
const int n4 = n & -4; | |||||
// For 256-bit this would be 8 | |||||
const int m_acc = (m & -num_accumulators); | |||||
const int m2 = m & -2; | |||||
for (; n_step < n4; n_step += 4) { | |||||
ptr_a = (bfloat16_t *)AA; | |||||
ptr_c0 = ptr_c; | |||||
ptr_c1 = ptr_c0 + ldc; | |||||
ptr_c2 = ptr_c1 + ldc; | |||||
ptr_c3 = ptr_c2 + ldc; | |||||
ptr_c += 4 * ldc; | |||||
int m_step = 0; | |||||
for (; m_step < m_acc; m_step += num_accumulators) { | |||||
svfloat32_t acc0 = svdup_f32(0); | |||||
svfloat32_t acc1 = svdup_f32(0); | |||||
svfloat32_t acc2 = svdup_f32(0); | |||||
svfloat32_t acc3 = svdup_f32(0); | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
ptr_a += num_accumulators * pad_k; | |||||
// Load entire 2VL block | |||||
for (BLASLONG p = 0; p < pad_k; p += 4) { | |||||
svbfloat16_t ma0 = svld1_bf16(pgtrue, ptr_a0); | |||||
svbfloat16_t ma1 = svld1_bf16(pgtrue, ptr_a0 + sve_size_bf16); | |||||
svbfloat16_t mb0 = svld1rq_bf16(pgtrue, ptr_b0); | |||||
svbfloat16_t mb1 = svld1rq_bf16(pgtrue, ptr_b0 + 8); | |||||
acc0 = svbfmmla_f32(acc0, mb0, ma0); | |||||
acc1 = svbfmmla_f32(acc1, mb0, ma1); | |||||
acc2 = svbfmmla_f32(acc2, mb1, ma0); | |||||
acc3 = svbfmmla_f32(acc3, mb1, ma1); | |||||
ptr_a0 += sve_size_bf16 * 2; | |||||
ptr_b0 += 16; | |||||
} | |||||
svfloat32_t out0 = svreinterpret_f32_u64(svuzp1_u64(svreinterpret_u64_f32(acc0), svreinterpret_u64_f32(acc1))); | |||||
svfloat32_t out1 = svreinterpret_f32_u64(svuzp2_u64(svreinterpret_u64_f32(acc0), svreinterpret_u64_f32(acc1))); | |||||
svfloat32_t out2 = svreinterpret_f32_u64(svuzp1_u64(svreinterpret_u64_f32(acc2), svreinterpret_u64_f32(acc3))); | |||||
svfloat32_t out3 = svreinterpret_f32_u64(svuzp2_u64(svreinterpret_u64_f32(acc2), svreinterpret_u64_f32(acc3))); | |||||
UPDATE_C(pgtrue, ptr_c0, tmp0, out0); | |||||
UPDATE_C(pgtrue, ptr_c1, tmp1, out1); | |||||
UPDATE_C(pgtrue, ptr_c2, tmp2, out2); | |||||
UPDATE_C(pgtrue, ptr_c3, tmp3, out3); | |||||
ptr_c0 += num_accumulators; | |||||
ptr_c1 += num_accumulators; | |||||
ptr_c2 += num_accumulators; | |||||
ptr_c3 += num_accumulators; | |||||
} | |||||
for (; m_step < m2; m_step += 2) { | |||||
float32x4_t acc0 = {0,0,0,0}; | |||||
float32x4_t acc1 = {0,0,0,0}; | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
ptr_a += 2 * pad_k; | |||||
for (BLASLONG p = 0; p < pad_k; p += 4) { | |||||
bfloat16x8_t ma0 = vld1q_bf16(ptr_a0); | |||||
bfloat16x8_t mb0 = vld1q_bf16(ptr_b0); | |||||
bfloat16x8_t mb1 = vld1q_bf16(ptr_b0 + 8); | |||||
acc0 = vbfmmlaq_f32(acc0, mb0, ma0); | |||||
acc1 = vbfmmlaq_f32(acc1, mb1, ma0); | |||||
ptr_a0 += 8; | |||||
ptr_b0 += 16; | |||||
} | |||||
UPDATE_C2(ptr_c0, tmp4, vget_low_f32(acc0)); | |||||
UPDATE_C2(ptr_c1, tmp5, vget_high_f32(acc0)); | |||||
UPDATE_C2(ptr_c2, tmp6, vget_low_f32(acc1)); | |||||
UPDATE_C2(ptr_c3, tmp7, vget_high_f32(acc1)); | |||||
ptr_c0 += 2; | |||||
ptr_c1 += 2; | |||||
ptr_c2 += 2; | |||||
ptr_c3 += 2; | |||||
} | |||||
// Final row is always a contiguous single row | |||||
if (m & 1) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
float32x4_t acc0 = {0,0,0,0}; | |||||
float32x4_t acc1 = {0,0,0,0}; | |||||
for (BLASLONG p = 0; p < pad_k; p += 4) { | |||||
/// Same A value can be used for both B values | |||||
bfloat16x8_t ma0 = vreinterpretq_bf16_u64(vdupq_n_u64( | |||||
*((uint64_t*)ptr_a0) | |||||
)); | |||||
bfloat16x8_t mb0 = vld1q_bf16(ptr_b0); | |||||
bfloat16x8_t mb1 = vld1q_bf16(ptr_b0 + 8); | |||||
acc0 = vbfmmlaq_f32(acc0, mb0, ma0); | |||||
acc1 = vbfmmlaq_f32(acc1, mb1, ma0); | |||||
ptr_a0 += 4; | |||||
ptr_b0 += 16; | |||||
} | |||||
UPDATE_C1(ptr_c0, acc0[1]); | |||||
UPDATE_C1(ptr_c1, acc0[3]); | |||||
UPDATE_C1(ptr_c2, acc1[1]); | |||||
UPDATE_C1(ptr_c3, acc1[3]); | |||||
} | |||||
ptr_b += 4 * pad_k; | |||||
} | |||||
for (; n_step < n2; n_step += 2) { | |||||
ptr_a = (bfloat16_t *)AA; | |||||
ptr_c0 = ptr_c; | |||||
ptr_c1 = ptr_c0 + ldc; | |||||
ptr_c += 2 * ldc; | |||||
// Sets of two are contiguously packed so yay | |||||
int m_step = 0; | |||||
for (; m_step < m_acc; m_step += num_accumulators) { | |||||
svfloat32_t acc0 = svdup_f32(0); | |||||
svfloat32_t acc1 = svdup_f32(0); | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
ptr_a += num_accumulators * pad_k; | |||||
// Load entire 8x4 block | |||||
for (BLASLONG p = 0; p < pad_k; p += 4) { | |||||
svbfloat16_t ma0 = svld1_bf16(pgtrue, ptr_a0); | |||||
svbfloat16_t ma1 = svld1_bf16(pgtrue, ptr_a0 + sve_size_bf16); | |||||
svbfloat16_t mb0 = svld1rq_bf16(pgtrue, ptr_b0); | |||||
acc0 = svbfmmla_f32(acc0, mb0, ma0); | |||||
acc1 = svbfmmla_f32(acc1, mb0, ma1); | |||||
ptr_a0 += sve_size_bf16 * 2; | |||||
ptr_b0 += 8; | |||||
} | |||||
svfloat32_t out0 = svreinterpret_f32_u64(svuzp1_u64(svreinterpret_u64_f32(acc0), svreinterpret_u64_f32(acc1))); | |||||
svfloat32_t out1 = svreinterpret_f32_u64(svuzp2_u64(svreinterpret_u64_f32(acc0), svreinterpret_u64_f32(acc1))); | |||||
UPDATE_C(pgtrue, ptr_c0, tmp0, out0); | |||||
UPDATE_C(pgtrue, ptr_c1, tmp1, out1); | |||||
ptr_c0 += num_accumulators; | |||||
ptr_c1 += num_accumulators; | |||||
} | |||||
for (; m_step < m2; m_step += 2) { | |||||
float32x4_t acc = {0,0,0,0}; | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
ptr_a += 2 * pad_k; | |||||
for (BLASLONG p = 0; p < pad_k; p += 4) { | |||||
bfloat16x8_t ma0 = vld1q_bf16(ptr_a0); | |||||
bfloat16x8_t mb0 = vld1q_bf16(ptr_b0); | |||||
acc = vbfmmlaq_f32(acc, mb0, ma0); | |||||
ptr_a0 += 8; | |||||
ptr_b0 += 8; | |||||
} | |||||
UPDATE_C2(ptr_c0, tmp4, vget_low_f32(acc)); | |||||
UPDATE_C2(ptr_c1, tmp5, vget_high_f32(acc)); | |||||
ptr_c0 += 2; | |||||
ptr_c1 += 2; | |||||
} | |||||
// Final row is always a contiguous single row | |||||
if (m & 1) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
float32x4_t acc = {0,0,0,0}; | |||||
for (BLASLONG p = 0; p < pad_k; p += 4) { | |||||
/// Same A value can be used for both B values | |||||
bfloat16x8_t ma0 = vreinterpretq_bf16_u64(vdupq_n_u64( | |||||
*((uint64_t*)ptr_a0) | |||||
)); | |||||
bfloat16x8_t mb0 = vld1q_bf16(ptr_b0); | |||||
acc = vbfmmlaq_f32(acc, mb0, ma0); | |||||
ptr_a0 += 4; | |||||
ptr_b0 += 8; | |||||
} | |||||
UPDATE_C1(ptr_c0, acc[0]); | |||||
UPDATE_C1(ptr_c1, acc[2]); | |||||
} | |||||
ptr_b += 2 * pad_k; | |||||
} | |||||
if (n & 1) { | |||||
ptr_a = (bfloat16_t *)AA; | |||||
ptr_c0 = ptr_c; | |||||
int m_step = 0; | |||||
for (; m_step < m_acc; m_step += num_accumulators) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
ptr_a += num_accumulators * pad_k; | |||||
svfloat32_t acc0 = svdup_f32(0); | |||||
svfloat32_t acc1 = svdup_f32(0); | |||||
// Load entire 8x4 block | |||||
for (BLASLONG p = 0; p < pad_k; p += 4) { | |||||
uint64_t* ptr_b0_u64 = (uint64_t*)ptr_b0; | |||||
svbfloat16_t ma0 = svld1_bf16(pgtrue, ptr_a0); | |||||
svbfloat16_t ma1 = svld1_bf16(pgtrue, ptr_a0 + sve_size_bf16); | |||||
svbfloat16_t mb0 = svreinterpret_bf16_u64(svdup_u64(*ptr_b0_u64)); | |||||
acc0 = svbfmmla_f32(acc0, mb0, ma0); | |||||
acc1 = svbfmmla_f32(acc1, mb0, ma1); | |||||
ptr_a0 += sve_size_bf16 * 2; | |||||
ptr_b0 += 4; | |||||
} | |||||
svfloat32_t out0 = svreinterpret_f32_u64(svuzp1_u64(svreinterpret_u64_f32(acc0), svreinterpret_u64_f32(acc1))); | |||||
UPDATE_C(pgtrue, ptr_c0, tmp0, out0); | |||||
ptr_c0 += num_accumulators; | |||||
} | |||||
for (; m_step < m2; m_step += 2) { | |||||
float32x4_t acc = {0, 0, 0, 0}; | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
ptr_a += 2 * pad_k; | |||||
for (BLASLONG p = 0; p < pad_k; p += 4) { | |||||
bfloat16x8_t ma0 = vld1q_bf16(ptr_a0); | |||||
bfloat16x8_t mb0 = vcombine_bf16(vld1_bf16(ptr_b0), vdup_n_bf16(vcvth_bf16_f32(0.0f))); | |||||
acc = vbfmmlaq_f32(acc, mb0, ma0); | |||||
ptr_a0 += 8; | |||||
ptr_b0 += 4; | |||||
} | |||||
UPDATE_C2(ptr_c0, tmp4, vget_low_f32(acc)); | |||||
ptr_c0 += 2; | |||||
} | |||||
if (m & 1) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
float32x2_t acc = {0,0}; | |||||
for (BLASLONG p = 0; p < pad_k; p += 4) { | |||||
bfloat16x4_t ma0 = vld1_bf16(ptr_a0); | |||||
bfloat16x4_t mb0 = vld1_bf16(ptr_b0); | |||||
acc = vbfdot_f32(acc, ma0, mb0); | |||||
ptr_a0 += 4; | |||||
ptr_b0 += 4; | |||||
} | |||||
UPDATE_C1(ptr_c0, acc[0] + acc[1]); | |||||
} | |||||
} | |||||
return 0; | |||||
} |
@@ -1,430 +0,0 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include <arm_sve.h> | |||||
#include <arm_neon.h> | |||||
#include "common.h" | |||||
#define INIT_C(M, N) mc##M##N = svdup_f32(0); | |||||
#define MATMUL(M, N) mc##M##N = svbfmmla(mc##M##N, ma##M, mb##N); | |||||
#define INIT_C_4x4 \ | |||||
do { \ | |||||
INIT_C(0, 0); \ | |||||
INIT_C(0, 1); \ | |||||
INIT_C(1, 0); \ | |||||
INIT_C(1, 1); \ | |||||
} while (0); | |||||
#ifdef ALPHA_ONE | |||||
#define UPDATE_C(PG16, PG32, PTR, TMP32, TMP16, SRC32) \ | |||||
do { \ | |||||
TMP32 = svreinterpret_f32_u32(svld1uh_u32((PG16), (uint16_t*)PTR)); \ | |||||
TMP32 = svadd_z((PG32), SRC32, TMP32); \ | |||||
TMP16 = svcvt_bf16_f32_z((PG32), TMP32); \ | |||||
TMP16 = svuzp1_bf16(TMP16, TMP16); \ | |||||
svst1_bf16((PG16), (PTR), TMP16); \ | |||||
} while (0) | |||||
#else | |||||
#define UPDATE_C(PG16, PG32, PTR, TMP32, TMP16, SRC32) \ | |||||
do { \ | |||||
TMP32 = svreinterpret_f32_u32(svld1uh_u32((PG16), (uint16_t*)PTR)); \ | |||||
TMP32 = svmad_z((PG32), svalpha, SRC32, TMP32); \ | |||||
TMP16 = svcvt_bf16_f32_z((PG32), TMP32); \ | |||||
TMP16 = svuzp1_bf16(TMP16, TMP16); \ | |||||
svst1_bf16((PG16), (PTR), TMP16); \ | |||||
} while (0) | |||||
#endif | |||||
#define ZIP_EVEN_ELEMENTS(PG, mc0, mc1, tmp, vc) \ | |||||
do { \ | |||||
(tmp) = svuzp1_f32((mc0), (mc1)); \ | |||||
(vc) = svcompact_f32((PG), (tmp)); \ | |||||
} while (0) | |||||
#define ZIP_ODD_ELEMENTS(PG, mc0, mc1, tmp, vc) \ | |||||
do { \ | |||||
(tmp) = svuzp2_f32((mc0), (mc1)); \ | |||||
(vc) = svcompact_f32((PG), (tmp)); \ | |||||
} while (0) | |||||
#define ACCUMULATE_LAST4_TO_FIRST4(M, N, TMP) \ | |||||
do { \ | |||||
TMP = svext_f32(mc##M##N, mc##M##N, 4); \ | |||||
mc##M##N = svadd_f32_z(svptrue_b32(), mc##M##N, (TMP)); \ | |||||
} while (0) | |||||
#ifdef ALPHA_ONE | |||||
int bgemm_kernel_neoversev1_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, | |||||
FLOAT alpha, IFLOAT *A, IFLOAT *B, | |||||
FLOAT *C, BLASLONG ldc) | |||||
#else | |||||
int bgemm_kernel_neoversev1_alpha(BLASLONG m, BLASLONG n, BLASLONG k, | |||||
FLOAT alpha, IFLOAT *A, IFLOAT *B, FLOAT *C, | |||||
BLASLONG ldc) | |||||
#endif | |||||
{ | |||||
BLASLONG pad_k = (k + 7) & ~7; | |||||
svbfloat16_t ma0, ma1, mb0, mb1; | |||||
svfloat32_t mc00, mc01, mc10, mc11, vc0, vc1, vc2, vc3; | |||||
svfloat32_t tmp; | |||||
#ifndef ALPHA_ONE | |||||
bfloat16_t alpha_bf16; | |||||
memcpy(&alpha_bf16, &alpha, sizeof(bfloat16_t)); | |||||
svfloat32_t svalpha = svdup_f32(vcvtah_f32_bf16(alpha_bf16)); | |||||
#endif | |||||
svbool_t pg16_all = svptrue_b16(); | |||||
svbool_t pg32_first_1 = svwhilelt_b32(0, 1); | |||||
svbool_t pg32_first_2 = svwhilelt_b32(0, 2); | |||||
svbool_t pg32_first_4 = svwhilelt_b32(0, 4); | |||||
svbool_t pg16_first_1 = svwhilelt_b16(0, 1); | |||||
svbool_t pg16_first_2 = svwhilelt_b16(0, 2); | |||||
svbool_t pg16_first_4 = svwhilelt_b16(0, 4); | |||||
svbool_t pg32_select_first_2_per_quadword = svdupq_b32(1, 1, 0, 0); | |||||
bfloat16_t *ptr_a = (bfloat16_t *)A; | |||||
bfloat16_t *ptr_b = (bfloat16_t *)B; | |||||
bfloat16_t *ptr_c = (bfloat16_t *)C; | |||||
bfloat16_t *ptr_a0; | |||||
bfloat16_t *ptr_b0; | |||||
bfloat16_t *ptr_c0, *ptr_c1, *ptr_c2, *ptr_c3; | |||||
svfloat32_t tmp32; | |||||
svbfloat16_t tmp16; | |||||
for (BLASLONG j = 0; j < n / 4; j++) { | |||||
ptr_c0 = ptr_c; | |||||
ptr_c1 = ptr_c0 + ldc; | |||||
ptr_c2 = ptr_c1 + ldc; | |||||
ptr_c3 = ptr_c2 + ldc; | |||||
ptr_c += 4 * ldc; | |||||
ptr_a = (bfloat16_t *)A; | |||||
for (BLASLONG i = 0; i < m / 4; i++) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 4 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C_4x4; | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
ma1 = svld1_bf16(pg16_all, ptr_a0 + 16); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
mb1 = svld1_bf16(pg16_all, ptr_b0 + 16); | |||||
MATMUL(0, 0); | |||||
MATMUL(0, 1); | |||||
MATMUL(1, 0); | |||||
MATMUL(1, 1); | |||||
ptr_a0 += 32; | |||||
ptr_b0 += 32; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(1, 1, tmp); | |||||
ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0); | |||||
ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc1); | |||||
ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc01, mc11, tmp, vc2); | |||||
ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc01, mc11, tmp, vc3); | |||||
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0, tmp32, tmp16, vc0); | |||||
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c1, tmp32, tmp16, vc1); | |||||
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c2, tmp32, tmp16, vc2); | |||||
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c3, tmp32, tmp16, vc3); | |||||
ptr_c0 += 4; | |||||
ptr_c1 += 4; | |||||
ptr_c2 += 4; | |||||
ptr_c3 += 4; | |||||
} | |||||
if (m & 2) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 2 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
INIT_C(0, 1); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
mb1 = svld1_bf16(pg16_all, ptr_b0 + 16); | |||||
MATMUL(0, 0); | |||||
MATMUL(0, 1); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 32; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp); | |||||
vc0 = svuzp1(mc00, mc00); | |||||
vc1 = svuzp2(mc00, mc00); | |||||
vc2 = svuzp1(mc01, mc01); | |||||
vc3 = svuzp2(mc01, mc01); | |||||
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c0, tmp32, tmp16, vc0); | |||||
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c1, tmp32, tmp16, vc1); | |||||
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c2, tmp32, tmp16, vc2); | |||||
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c3, tmp32, tmp16, vc3); | |||||
ptr_c0 += 2; | |||||
ptr_c1 += 2; | |||||
ptr_c2 += 2; | |||||
ptr_c3 += 2; | |||||
} | |||||
if (m & 1) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
INIT_C(0, 1); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
mb1 = svld1_bf16(pg16_all, ptr_b0 + 16); | |||||
MATMUL(0, 0); | |||||
MATMUL(0, 1); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 32; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp); | |||||
// use compact is more straightforward | |||||
vc1 = svuzp2(mc00, mc00); | |||||
vc3 = svuzp2(mc01, mc01); | |||||
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c0, tmp32, tmp16, mc00); | |||||
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c1, tmp32, tmp16, vc1); | |||||
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c2, tmp32, tmp16, mc01); | |||||
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c3, tmp32, tmp16, vc3); | |||||
} | |||||
ptr_b += 4 * pad_k; | |||||
} | |||||
if (n & 2) { | |||||
ptr_c0 = ptr_c; | |||||
ptr_c1 = ptr_c0 + ldc; | |||||
ptr_c += 2 * ldc; | |||||
ptr_a = (bfloat16_t *)A; | |||||
for (BLASLONG i = 0; i < m / 4; i++) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 4 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
INIT_C(1, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
ma1 = svld1_bf16(pg16_all, ptr_a0 + 16); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
MATMUL(1, 0); | |||||
ptr_a0 += 32; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp); | |||||
ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0); | |||||
ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc2); | |||||
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0, tmp32, tmp16, vc0); | |||||
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c1, tmp32, tmp16, vc2); | |||||
ptr_c0 += 4; | |||||
ptr_c1 += 4; | |||||
} | |||||
if (m & 2) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 2 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
vc0 = svuzp1(mc00, mc00); | |||||
vc1 = svuzp2(mc00, mc00); | |||||
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c0, tmp32, tmp16, vc0); | |||||
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c1, tmp32, tmp16, vc1); | |||||
ptr_c0 += 2; | |||||
ptr_c1 += 2; | |||||
} | |||||
if (m & 1) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
vc1 = svuzp2(mc00, mc00); | |||||
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c0, tmp32, tmp16, mc00); | |||||
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c1, tmp32, tmp16, vc1); | |||||
} | |||||
ptr_b += 2 * pad_k; | |||||
} | |||||
if (n & 1) { // TODO: this case seems a overhead. find out whether it's in our | |||||
// case. | |||||
ptr_c0 = ptr_c; | |||||
ptr_a = (bfloat16_t *)A; | |||||
for (BLASLONG i = 0; i < m / 4; i++) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 4 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
INIT_C(1, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
ma1 = svld1_bf16(pg16_all, ptr_a0 + 16); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
MATMUL(1, 0); | |||||
ptr_a0 += 32; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp); | |||||
ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0); | |||||
UPDATE_C(pg16_first_4, pg32_first_4, ptr_c0, tmp32, tmp16, vc0); | |||||
ptr_c0 += 4; | |||||
} | |||||
if (m & 2) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 2 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
vc0 = svuzp1(mc00, mc00); | |||||
UPDATE_C(pg16_first_2, pg32_first_2, ptr_c0, tmp32, tmp16, vc0); | |||||
ptr_c0 += 2; | |||||
} | |||||
if (m & 1) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
UPDATE_C(pg16_first_1, pg32_first_1, ptr_c0, tmp32, tmp16, mc00); | |||||
} | |||||
} | |||||
return 0; | |||||
} |
@@ -0,0 +1,135 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include <arm_sve.h> | |||||
#include <arm_neon.h> | |||||
#include "common.h" | |||||
int CNAME(BLASLONG n, BLASLONG m, IFLOAT *input, BLASLONG lda, IFLOAT *output) { | |||||
const int sve_size_bf16 = svcnth(); | |||||
const int num_accumulators = sve_size_bf16 >> 1; | |||||
const int m_sve_accumulators = m & -num_accumulators; | |||||
const int n4 = n & -4; | |||||
const int n_rest = n - n4; | |||||
const int m2 = m & -2; | |||||
const int m_rest = m - m2; | |||||
size_t m_step = 0; | |||||
for (; m_step < m_sve_accumulators; m_step += num_accumulators) { | |||||
const uint16_t* inner_input = input; | |||||
// Potential for vld1q here with transpose | |||||
for (int n_step = 0; n_step < n4; n_step += 4) { | |||||
for (int line = 0; line < num_accumulators; line += 4) { | |||||
uint16x4_t a_vec0 = vld1_u16(inner_input + line * lda); | |||||
uint16x4_t a_vec1 = vld1_u16(inner_input + (line + 1) * lda); | |||||
uint16x4_t a_vec2 = vld1_u16(inner_input + (line + 2) * lda); | |||||
uint16x4_t a_vec3 = vld1_u16(inner_input + (line + 3) * lda); | |||||
vst1_u16(output, a_vec0); | |||||
vst1_u16(output + 4, a_vec1); | |||||
vst1_u16(output + 8, a_vec2); | |||||
vst1_u16(output + 12, a_vec3); | |||||
output += 16; | |||||
} | |||||
inner_input += 4; | |||||
} | |||||
// Bit of padding up to 4 for any remaining K | |||||
// by the time we get here we hope the memory bandwidth is saturated | |||||
if (n_rest) { | |||||
for (BLASLONG line = 0; line < num_accumulators; line++) { | |||||
output[0] = inner_input[0]; | |||||
output[1] = n_rest == 1 ? 0 : inner_input[1]; | |||||
output[2] = n_rest <= 2 ? 0 : inner_input[2]; | |||||
output[3] = n_rest <= 3 ? 0 : inner_input[3]; | |||||
inner_input += lda; | |||||
output += 4; | |||||
} | |||||
} | |||||
input += lda * num_accumulators; | |||||
} | |||||
// Any remaining blocks are done 2 at a time for ASIMD processing | |||||
for (; m_step < m2; m_step += 2) { | |||||
const uint16_t* inner_input = input; | |||||
for (size_t n_step = 0; n_step < n4; n_step += 4) { | |||||
uint16x4_t a_vec0 = vld1_u16(inner_input); | |||||
uint16x4_t a_vec1 = vld1_u16(inner_input + lda); | |||||
vst1_u16(output, a_vec0); | |||||
vst1_u16(output + 4, a_vec1); | |||||
inner_input += 4; | |||||
output += 8; | |||||
} | |||||
if (n_rest) { | |||||
for (BLASLONG line = 0; line < 2; line++) { | |||||
output[0] = inner_input[0]; | |||||
output[1] = n_rest == 1 ? 0 : inner_input[1]; | |||||
output[2] = n_rest <= 2 ? 0 : inner_input[2]; | |||||
output[3] = n_rest <= 3 ? 0 : inner_input[3]; | |||||
inner_input += lda; | |||||
output += 4; | |||||
} | |||||
} | |||||
input += lda * 2; | |||||
} | |||||
// Final row is just there | |||||
if (m_rest & 1) { | |||||
for (size_t n_step = 0; n_step < n4; n_step += 4) { | |||||
uint16x4_t a_vec0 = vld1_u16(input); | |||||
vst1_u16(output, a_vec0); | |||||
input += 4; | |||||
output += 4; | |||||
} | |||||
if (n_rest) { | |||||
output[0] = input[0]; | |||||
output[1] = n_rest == 1 ? 0 : input[1]; | |||||
output[2] = n_rest <= 2 ? 0 : input[2]; | |||||
output[3] = n_rest <= 3 ? 0 : input[3]; | |||||
} | |||||
} | |||||
return 0; | |||||
} |
@@ -0,0 +1,124 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include <arm_sve.h> | |||||
#include <arm_neon.h> | |||||
#include "common.h" | |||||
int CNAME(BLASLONG n, BLASLONG m, IFLOAT *input, BLASLONG lda, IFLOAT *output) { | |||||
const int num_accumulators = 4; | |||||
const int m_accumulators = m & -4; | |||||
const int n4 = n & -4; | |||||
const int n_rest = n - n4; | |||||
const int m_rest = m - m_accumulators; | |||||
for (size_t m_step = 0; m_step < m_accumulators; m_step += num_accumulators) { | |||||
const uint16_t* inner_input = input; | |||||
// Potential for vld1q here with transpose | |||||
for (size_t n_step = 0; n_step < n4; n_step += 4) { | |||||
uint16x4_t a_vec0 = vld1_u16(inner_input + 0 * lda); | |||||
uint16x4_t a_vec1 = vld1_u16(inner_input + 1 * lda); | |||||
uint16x4_t a_vec2 = vld1_u16(inner_input + 2 * lda); | |||||
uint16x4_t a_vec3 = vld1_u16(inner_input + 3 * lda); | |||||
vst1_u16(output, a_vec0); | |||||
vst1_u16(output + 4, a_vec1); | |||||
vst1_u16(output + 8, a_vec2); | |||||
vst1_u16(output + 12, a_vec3); | |||||
output += 16; | |||||
inner_input += 4; | |||||
} | |||||
if (n_rest) { | |||||
for (BLASLONG line = 0; line < num_accumulators; line++) { | |||||
output[0] = inner_input[0]; | |||||
output[1] = n_rest == 1 ? 0 : inner_input[1]; | |||||
output[2] = n_rest <= 2 ? 0 : inner_input[2]; | |||||
output[3] = n_rest <= 3 ? 0 : inner_input[3]; | |||||
inner_input += lda; | |||||
output += 4; | |||||
} | |||||
} | |||||
input += lda * num_accumulators; | |||||
} | |||||
if (m_rest & 2) { | |||||
const uint16_t* inner_input = input; | |||||
for (size_t n_step = 0; n_step < n4; n_step += 4) { | |||||
uint16x4_t a_vec0 = vld1_u16(inner_input); | |||||
uint16x4_t a_vec1 = vld1_u16(inner_input + lda); | |||||
vst1_u16(output, a_vec0); | |||||
vst1_u16(output + 4, a_vec1); | |||||
inner_input += 4; | |||||
output += 8; | |||||
} | |||||
if (n_rest) { | |||||
for (BLASLONG line = 0; line < 2; line++) { | |||||
output[0] = inner_input[0]; | |||||
output[1] = n_rest == 1 ? 0 : inner_input[1]; | |||||
output[2] = n_rest <= 2 ? 0 : inner_input[2]; | |||||
output[3] = n_rest <= 3 ? 0 : inner_input[3]; | |||||
inner_input += lda; | |||||
output += 4; | |||||
} | |||||
} | |||||
input += lda * 2; | |||||
} | |||||
if (m_rest & 1) { | |||||
for (size_t n_step = 0; n_step < n4; n_step += 4) { | |||||
uint16x4_t a_vec0 = vld1_u16(input); | |||||
vst1_u16(output, a_vec0); | |||||
input += 4; | |||||
output += 4; | |||||
} | |||||
if (n_rest) { | |||||
output[0] = input[0]; | |||||
output[1] = n_rest == 1 ? 0 : input[1]; | |||||
output[2] = n_rest <= 2 ? 0 : input[2]; | |||||
output[3] = n_rest <= 3 ? 0 : input[3]; | |||||
} | |||||
} | |||||
return 0; | |||||
} |
@@ -0,0 +1,154 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include <arm_sve.h> | |||||
#include <arm_neon.h> | |||||
#include "common.h" | |||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *input, BLASLONG lda, IFLOAT *output) { | |||||
const int sve_size_bf16 = svcnth(); | |||||
const int num_accumulators_sve = sve_size_bf16 >> 1; | |||||
const int num_accumulators = num_accumulators_sve; | |||||
const int incr_accumulators = 4; | |||||
const int n_sve_accumulators = (n & -num_accumulators); | |||||
const int n2 = n & -2; | |||||
const int n_rest = n - n2; | |||||
const int m4 = m & -4; | |||||
const int m_rest = m - m4; | |||||
size_t n_step = 0; | |||||
for (; n_step < n_sve_accumulators; n_step += num_accumulators) { | |||||
const uint16_t* inner_input = input; | |||||
// Full 4x4 item transposes down the M dimension | |||||
for (size_t m_step = 0; m_step < m4; m_step += 4) { | |||||
const uint16_t* tile = inner_input; | |||||
for (size_t line = 0; line < num_accumulators; line += incr_accumulators) { | |||||
// Load 4x4 block | |||||
uint16x4_t a_vec0 = vld1_u16(tile); | |||||
uint16x4_t a_vec1 = vld1_u16(tile + lda); | |||||
uint16x4_t a_vec2 = vld1_u16(tile + 2 * lda); | |||||
uint16x4_t a_vec3 = vld1_u16(tile + 3 * lda); | |||||
// Transpose 4x4 blocks | |||||
uint16x4_t out_vec0 = vzip1_u16(a_vec0, a_vec1); | |||||
uint16x4_t out_vec1 = vzip2_u16(a_vec0, a_vec1); | |||||
uint16x4_t out_vec2 = vzip1_u16(a_vec2, a_vec3); | |||||
uint16x4_t out_vec3 = vzip2_u16(a_vec2, a_vec3); | |||||
// Transpose 8x4 blocks | |||||
a_vec0 = vreinterpret_u16_u32(vzip1_u32(vreinterpret_u32_u16(out_vec0), vreinterpret_u32_u16(out_vec2))); | |||||
a_vec1 = vreinterpret_u16_u32(vzip2_u32(vreinterpret_u32_u16(out_vec0), vreinterpret_u32_u16(out_vec2))); | |||||
a_vec2 = vreinterpret_u16_u32(vzip1_u32(vreinterpret_u32_u16(out_vec1), vreinterpret_u32_u16(out_vec3))); | |||||
a_vec3 = vreinterpret_u16_u32(vzip2_u32(vreinterpret_u32_u16(out_vec1), vreinterpret_u32_u16(out_vec3))); | |||||
vst1_u16(output, a_vec0); | |||||
vst1_u16(output + 4, a_vec1); | |||||
vst1_u16(output + 8, a_vec2); | |||||
vst1_u16(output + 12, a_vec3); | |||||
tile += incr_accumulators; | |||||
output += 16; | |||||
} | |||||
inner_input += incr_accumulators * lda; | |||||
} | |||||
if (m_rest) { | |||||
for (BLASLONG line = 0; line < num_accumulators; line++) { | |||||
output[0] = inner_input[0]; | |||||
output[1] = m_rest == 1 ? 0 : *(inner_input + lda); | |||||
output[2] = m_rest <= 2 ? 0 : *(inner_input + 2 * lda); | |||||
output[3] = m_rest <= 3 ? 0 : *(inner_input + 3 * lda); | |||||
inner_input++; | |||||
output += 4; | |||||
} | |||||
} | |||||
input += num_accumulators; | |||||
} | |||||
for (; n_step < n2; n_step += 2) { | |||||
const uint16_t* inner_input = input; | |||||
for (size_t m_step = 0; m_step < m4; m_step += 4) { | |||||
for (BLASLONG line = 0; line < 2; line++) { | |||||
output[0] = *(inner_input + line); | |||||
output[1] = *(inner_input + line + lda); | |||||
output[2] = *(inner_input + line + 2 * lda); | |||||
output[3] = *(inner_input + line + 3 * lda); | |||||
output += 4; | |||||
} | |||||
inner_input += 4 * lda; | |||||
} | |||||
if (m_rest) { | |||||
for (BLASLONG line = 0; line < 2; line++) { | |||||
output[0] = *(inner_input + line); | |||||
output[1] = m_rest == 1 ? 0 : *(inner_input + line + lda); | |||||
output[2] = m_rest <= 2 ? 0 : *(inner_input + line + 2 * lda); | |||||
output[3] = m_rest <= 3 ? 0 : *(inner_input + line + 3 * lda); | |||||
output += 4; | |||||
} | |||||
} | |||||
input += 2; | |||||
} | |||||
if (n_rest & 1) { | |||||
const uint16_t* inner_input = input; | |||||
for (size_t m_step = 0; m_step < m4; m_step += 4) { | |||||
output[0] = *inner_input; | |||||
output[1] = *(inner_input + lda); | |||||
output[2] = *(inner_input + 2 * lda); | |||||
output[3] = *(inner_input + 3 * lda); | |||||
inner_input += 4 * lda; | |||||
output += 4; | |||||
} | |||||
if (m_rest) { | |||||
output[0] = inner_input[0]; | |||||
output[1] = m_rest == 1 ? 0 : *(inner_input + lda); | |||||
output[2] = m_rest <= 2 ? 0 : *(inner_input + 2 * lda); | |||||
output[3] = m_rest <= 3 ? 0 : *(inner_input + 3 * lda); | |||||
output += 4; | |||||
} | |||||
} | |||||
return 0; | |||||
} |
@@ -0,0 +1,143 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include <arm_sve.h> | |||||
#include <arm_neon.h> | |||||
#include "common.h" | |||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *input, BLASLONG lda, IFLOAT *output) { | |||||
const int num_accumulators = 4; | |||||
const int n_accumulators = (n & -num_accumulators); | |||||
const int n_rest = n - n_accumulators; | |||||
const int m4 = m & -4; | |||||
const int m_rest = m - m4; | |||||
for (size_t n_step = 0; n_step < n_accumulators; n_step += num_accumulators) { | |||||
const uint16_t* inner_input = input; | |||||
// Full 4x4 item transposes down the M dimension | |||||
for (size_t m_step = 0; m_step < m4; m_step += 4) { | |||||
// Load 4x4 block | |||||
uint16x4_t a_vec0 = vld1_u16(inner_input); | |||||
uint16x4_t a_vec1 = vld1_u16(inner_input + lda); | |||||
uint16x4_t a_vec2 = vld1_u16(inner_input + 2 * lda); | |||||
uint16x4_t a_vec3 = vld1_u16(inner_input + 3 * lda); | |||||
// Transpose 4x4 blocks | |||||
uint16x4_t out_vec0 = vzip1_u16(a_vec0, a_vec1); | |||||
uint16x4_t out_vec1 = vzip2_u16(a_vec0, a_vec1); | |||||
uint16x4_t out_vec2 = vzip1_u16(a_vec2, a_vec3); | |||||
uint16x4_t out_vec3 = vzip2_u16(a_vec2, a_vec3); | |||||
// Transpose 8x4 blocks | |||||
a_vec0 = vreinterpret_u16_u32(vzip1_u32(vreinterpret_u32_u16(out_vec0), vreinterpret_u32_u16(out_vec2))); | |||||
a_vec1 = vreinterpret_u16_u32(vzip2_u32(vreinterpret_u32_u16(out_vec0), vreinterpret_u32_u16(out_vec2))); | |||||
a_vec2 = vreinterpret_u16_u32(vzip1_u32(vreinterpret_u32_u16(out_vec1), vreinterpret_u32_u16(out_vec3))); | |||||
a_vec3 = vreinterpret_u16_u32(vzip2_u32(vreinterpret_u32_u16(out_vec1), vreinterpret_u32_u16(out_vec3))); | |||||
vst1_u16(output, a_vec0); | |||||
vst1_u16(output + 4, a_vec1); | |||||
vst1_u16(output + 8, a_vec2); | |||||
vst1_u16(output + 12, a_vec3); | |||||
inner_input += 4 * lda; | |||||
output += 16; | |||||
} | |||||
if (m_rest) { | |||||
for (BLASLONG line = 0; line < num_accumulators; line++) { | |||||
output[0] = inner_input[0]; | |||||
output[1] = m_rest == 1 ? 0 : *(inner_input + lda); | |||||
output[2] = m_rest <= 2 ? 0 : *(inner_input + 2 * lda); | |||||
output[3] = m_rest <= 3 ? 0 : *(inner_input + 3 * lda); | |||||
inner_input++; | |||||
output += 4; | |||||
} | |||||
} | |||||
input += num_accumulators; | |||||
} | |||||
// Extract two remaining rows as 128-bit vector paired | |||||
if (n_rest & 2) { | |||||
const uint16_t* inner_input = input; | |||||
for (size_t m_step = 0; m_step < m4; m_step += 4) { | |||||
for (BLASLONG line = 0; line < 2; line++) { | |||||
output[0] = *(inner_input + line); | |||||
output[1] = *(inner_input + line + lda); | |||||
output[2] = *(inner_input + line + 2 * lda); | |||||
output[3] = *(inner_input + line + 3 * lda); | |||||
output += 4; | |||||
} | |||||
inner_input += 4 * lda; | |||||
} | |||||
if (m_rest) { | |||||
for (BLASLONG line = 0; line < 2; line++) { | |||||
output[0] = *(inner_input + line); | |||||
output[1] = m_rest == 1 ? 0 : *(inner_input + line + lda); | |||||
output[2] = m_rest <= 2 ? 0 : *(inner_input + line + 2 * lda); | |||||
output[3] = m_rest <= 3 ? 0 : *(inner_input + line + 3 * lda); | |||||
output += 4; | |||||
} | |||||
} | |||||
input += 2; | |||||
} | |||||
// Flatten final row | |||||
if (n_rest & 1) { | |||||
const uint16_t* inner_input = input; | |||||
for (size_t m_step = 0; m_step < m4; m_step += 4) { | |||||
output[0] = *inner_input; | |||||
output[1] = *(inner_input + lda); | |||||
output[2] = *(inner_input + 2 * lda); | |||||
output[3] = *(inner_input + 3 * lda); | |||||
inner_input += 4 * lda; | |||||
output += 4; | |||||
} | |||||
if (m_rest) { | |||||
output[0] = inner_input[0]; | |||||
output[1] = m_rest == 1 ? 0 : *(inner_input + lda); | |||||
output[2] = m_rest <= 2 ? 0 : *(inner_input + 2 * lda); | |||||
output[3] = m_rest <= 3 ? 0 : *(inner_input + 3 * lda); | |||||
output += 4; | |||||
} | |||||
} | |||||
return 0; | |||||
} |
@@ -1,46 +0,0 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2024-2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include <arm_sve.h> | |||||
#include "common.h" | |||||
#define ALPHA_ONE | |||||
#include "sbgemm_kernel_4x4_neoversev1_impl.c" | |||||
#undef ALPHA_ONE | |||||
#include "sbgemm_kernel_4x4_neoversev1_impl.c" | |||||
int CNAME(BLASLONG m, BLASLONG n, BLASLONG k, FLOAT alpha, IFLOAT *A, IFLOAT *B, | |||||
FLOAT *C, BLASLONG ldc) { | |||||
if (alpha == 1.0f) | |||||
return sbgemm_kernel_neoversev1_alpha_one(m, n, k, alpha, A, B, C, ldc); | |||||
else | |||||
return sbgemm_kernel_neoversev1_alpha(m, n, k, alpha, A, B, C, ldc); | |||||
return 0; | |||||
} | |||||
@@ -1,414 +0,0 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2024-2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include <arm_sve.h> | |||||
#include "common.h" | |||||
#define INIT_C(M, N) mc##M##N = svdup_f32(0); | |||||
#define MATMUL(M, N) mc##M##N = svbfmmla(mc##M##N, ma##M, mb##N); | |||||
#define INIT_C_4x4 \ | |||||
do { \ | |||||
INIT_C(0, 0); \ | |||||
INIT_C(0, 1); \ | |||||
INIT_C(1, 0); \ | |||||
INIT_C(1, 1); \ | |||||
} while (0); | |||||
#ifdef ALPHA_ONE | |||||
#define UPDATE_C(PG, PTR, DST, SRC) \ | |||||
do { \ | |||||
DST = svld1_f32((PG), (PTR)); \ | |||||
DST = svadd_z((PG), SRC, DST); \ | |||||
svst1_f32((PG), (PTR), DST); \ | |||||
} while (0); | |||||
#else | |||||
#define UPDATE_C(PG, PTR, DST, SRC) \ | |||||
do { \ | |||||
DST = svld1_f32((PG), (PTR)); \ | |||||
DST = svmad_z((PG), svalpha, SRC, DST); \ | |||||
svst1_f32((PG), (PTR), DST); \ | |||||
} while (0); | |||||
#endif | |||||
#define ZIP_EVEN_ELEMENTS(PG, mc0, mc1, tmp, vc) \ | |||||
do { \ | |||||
(tmp) = svuzp1_f32((mc0), (mc1)); \ | |||||
(vc) = svcompact_f32((PG), (tmp)); \ | |||||
} while (0) | |||||
#define ZIP_ODD_ELEMENTS(PG, mc0, mc1, tmp, vc) \ | |||||
do { \ | |||||
(tmp) = svuzp2_f32((mc0), (mc1)); \ | |||||
(vc) = svcompact_f32((PG), (tmp)); \ | |||||
} while (0) | |||||
#define ACCUMULATE_LAST4_TO_FIRST4(M, N, TMP) \ | |||||
do { \ | |||||
TMP = svext_f32(mc##M##N, mc##M##N, 4); \ | |||||
mc##M##N = svadd_f32_z(svptrue_b32(), mc##M##N, (TMP)); \ | |||||
} while (0) | |||||
#ifdef ALPHA_ONE | |||||
int sbgemm_kernel_neoversev1_alpha_one(BLASLONG m, BLASLONG n, BLASLONG k, | |||||
FLOAT alpha, IFLOAT *A, IFLOAT *B, | |||||
FLOAT *C, BLASLONG ldc) | |||||
#else | |||||
int sbgemm_kernel_neoversev1_alpha(BLASLONG m, BLASLONG n, BLASLONG k, | |||||
FLOAT alpha, IFLOAT *A, IFLOAT *B, FLOAT *C, | |||||
BLASLONG ldc) | |||||
#endif | |||||
{ | |||||
BLASLONG pad_k = (k + 7) & ~7; | |||||
svbfloat16_t ma0, ma1, mb0, mb1; | |||||
svfloat32_t mc00, mc01, mc10, mc11, vc0, vc1, vc2, vc3, oc0, oc1, oc2, oc3; | |||||
svfloat32_t tmp; | |||||
svfloat32_t svalpha = svdup_f32(alpha); | |||||
svbool_t pg16_all = svptrue_b16(); | |||||
svbool_t pg32_first_1 = svwhilelt_b32(0, 1); | |||||
svbool_t pg32_first_2 = svwhilelt_b32(0, 2); | |||||
svbool_t pg32_first_4 = svwhilelt_b32(0, 4); | |||||
svbool_t pg32_select_first_2_per_quadword = svdupq_b32(1, 1, 0, 0); | |||||
bfloat16_t *ptr_a = (bfloat16_t *)A; | |||||
bfloat16_t *ptr_b = (bfloat16_t *)B; | |||||
FLOAT *ptr_c = C; | |||||
bfloat16_t *ptr_a0; | |||||
bfloat16_t *ptr_b0; | |||||
FLOAT *ptr_c0, *ptr_c1, *ptr_c2, *ptr_c3; | |||||
for (BLASLONG j = 0; j < n / 4; j++) { | |||||
ptr_c0 = ptr_c; | |||||
ptr_c1 = ptr_c0 + ldc; | |||||
ptr_c2 = ptr_c1 + ldc; | |||||
ptr_c3 = ptr_c2 + ldc; | |||||
ptr_c += 4 * ldc; | |||||
ptr_a = (bfloat16_t *)A; | |||||
for (BLASLONG i = 0; i < m / 4; i++) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 4 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C_4x4; | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
ma1 = svld1_bf16(pg16_all, ptr_a0 + 16); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
mb1 = svld1_bf16(pg16_all, ptr_b0 + 16); | |||||
MATMUL(0, 0); | |||||
MATMUL(0, 1); | |||||
MATMUL(1, 0); | |||||
MATMUL(1, 1); | |||||
ptr_a0 += 32; | |||||
ptr_b0 += 32; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(1, 1, tmp); | |||||
ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0); | |||||
ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc1); | |||||
ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc01, mc11, tmp, vc2); | |||||
ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc01, mc11, tmp, vc3); | |||||
UPDATE_C(pg32_first_4, ptr_c0, oc0, vc0); | |||||
UPDATE_C(pg32_first_4, ptr_c1, oc1, vc1); | |||||
UPDATE_C(pg32_first_4, ptr_c2, oc2, vc2) | |||||
UPDATE_C(pg32_first_4, ptr_c3, oc3, vc3) | |||||
ptr_c0 += 4; | |||||
ptr_c1 += 4; | |||||
ptr_c2 += 4; | |||||
ptr_c3 += 4; | |||||
} | |||||
if (m & 2) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 2 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
INIT_C(0, 1); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
mb1 = svld1_bf16(pg16_all, ptr_b0 + 16); | |||||
MATMUL(0, 0); | |||||
MATMUL(0, 1); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 32; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp); | |||||
vc0 = svuzp1(mc00, mc00); | |||||
vc1 = svuzp2(mc00, mc00); | |||||
vc2 = svuzp1(mc01, mc01); | |||||
vc3 = svuzp2(mc01, mc01); | |||||
UPDATE_C(pg32_first_2, ptr_c0, oc0, vc0); | |||||
UPDATE_C(pg32_first_2, ptr_c1, oc1, vc1); | |||||
UPDATE_C(pg32_first_2, ptr_c2, oc2, vc2); | |||||
UPDATE_C(pg32_first_2, ptr_c3, oc3, vc3); | |||||
ptr_c0 += 2; | |||||
ptr_c1 += 2; | |||||
ptr_c2 += 2; | |||||
ptr_c3 += 2; | |||||
} | |||||
if (m & 1) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
INIT_C(0, 1); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
mb1 = svld1_bf16(pg16_all, ptr_b0 + 16); | |||||
MATMUL(0, 0); | |||||
MATMUL(0, 1); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 32; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 1, tmp); | |||||
// use compact is more straightforward | |||||
vc1 = svuzp2(mc00, mc00); | |||||
vc3 = svuzp2(mc01, mc01); | |||||
UPDATE_C(pg32_first_1, ptr_c0, oc0, mc00); | |||||
UPDATE_C(pg32_first_1, ptr_c1, oc1, vc1); | |||||
UPDATE_C(pg32_first_1, ptr_c2, oc2, mc01); | |||||
UPDATE_C(pg32_first_1, ptr_c3, oc3, vc3); | |||||
} | |||||
ptr_b += 4 * pad_k; | |||||
} | |||||
if (n & 2) { | |||||
ptr_c0 = ptr_c; | |||||
ptr_c1 = ptr_c0 + ldc; | |||||
ptr_c += 2 * ldc; | |||||
ptr_a = (bfloat16_t *)A; | |||||
for (BLASLONG i = 0; i < m / 4; i++) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 4 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
INIT_C(1, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
ma1 = svld1_bf16(pg16_all, ptr_a0 + 16); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
MATMUL(1, 0); | |||||
ptr_a0 += 32; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp); | |||||
ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0); | |||||
ZIP_ODD_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc2); | |||||
UPDATE_C(pg32_first_4, ptr_c0, oc0, vc0); | |||||
UPDATE_C(pg32_first_4, ptr_c1, oc2, vc2); | |||||
ptr_c0 += 4; | |||||
ptr_c1 += 4; | |||||
} | |||||
if (m & 2) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 2 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
vc0 = svuzp1(mc00, mc00); | |||||
vc1 = svuzp2(mc00, mc00); | |||||
UPDATE_C(pg32_first_2, ptr_c0, oc0, vc0); | |||||
UPDATE_C(pg32_first_2, ptr_c1, oc1, vc1); | |||||
ptr_c0 += 2; | |||||
ptr_c1 += 2; | |||||
} | |||||
if (m & 1) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
vc1 = svuzp2(mc00, mc00); | |||||
UPDATE_C(pg32_first_1, ptr_c0, oc0, mc00); | |||||
UPDATE_C(pg32_first_1, ptr_c1, oc1, vc1); | |||||
} | |||||
ptr_b += 2 * pad_k; | |||||
} | |||||
if (n & 1) { // TODO: this case seems a overhead. find out whether it's in our | |||||
// case. | |||||
ptr_c0 = ptr_c; | |||||
ptr_a = (bfloat16_t *)A; | |||||
for (BLASLONG i = 0; i < m / 4; i++) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 4 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
INIT_C(1, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
ma1 = svld1_bf16(pg16_all, ptr_a0 + 16); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
MATMUL(1, 0); | |||||
ptr_a0 += 32; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
ACCUMULATE_LAST4_TO_FIRST4(1, 0, tmp); | |||||
ZIP_EVEN_ELEMENTS(pg32_select_first_2_per_quadword, mc00, mc10, tmp, vc0); | |||||
UPDATE_C(pg32_first_4, ptr_c0, oc0, vc0); | |||||
ptr_c0 += 4; | |||||
} | |||||
if (m & 2) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_a += 2 * pad_k; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
vc0 = svuzp1(mc00, mc00); | |||||
UPDATE_C(pg32_first_2, ptr_c0, oc0, vc0); | |||||
ptr_c0 += 2; | |||||
} | |||||
if (m & 1) { | |||||
ptr_a0 = ptr_a; | |||||
ptr_b0 = ptr_b; | |||||
INIT_C(0, 0); | |||||
for (BLASLONG p = 0; p < pad_k; p += 8) { | |||||
ma0 = svld1_bf16(pg16_all, ptr_a0); | |||||
mb0 = svld1_bf16(pg16_all, ptr_b0); | |||||
MATMUL(0, 0); | |||||
ptr_a0 += 16; | |||||
ptr_b0 += 16; | |||||
} | |||||
ACCUMULATE_LAST4_TO_FIRST4(0, 0, tmp); | |||||
UPDATE_C(pg32_first_1, ptr_c0, oc0, mc00); | |||||
} | |||||
} | |||||
return 0; | |||||
} |
@@ -1,148 +0,0 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2024-2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include <arm_sve.h> | |||||
#include "common.h" | |||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { | |||||
IFLOAT *a_offset; | |||||
IFLOAT *a_offsetx[4]; | |||||
IFLOAT *b_offset; | |||||
a_offset = a; | |||||
b_offset = b; | |||||
bfloat16_t zero_value_bf16; | |||||
*((uint16_t *)(&zero_value_bf16)) = 0; | |||||
svbool_t pg16_all = svptrue_b16(); // 16 elements for sve-256 machine. | |||||
svbool_t pg16_first_8 = svwhilelt_b16(0, 8); | |||||
svbfloat16_t v0, v1, v2, v3; | |||||
svuint64_t t0, t1; | |||||
BLASLONG rest = m & 7; | |||||
svbool_t pg16_rest = svwhilelt_b16_s32(0, rest); | |||||
for (BLASLONG j = 0; j < n / 4; j++) { | |||||
a_offsetx[0] = a_offset; | |||||
a_offsetx[1] = a_offsetx[0] + lda; | |||||
a_offsetx[2] = a_offsetx[1] + lda; | |||||
a_offsetx[3] = a_offsetx[2] + lda; | |||||
a_offset += 4 * lda; | |||||
for (BLASLONG i = 0; i < m / 8; i++) { | |||||
v0 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[0]); | |||||
v1 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[1]); | |||||
v2 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[2]); | |||||
v3 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[3]); | |||||
t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); | |||||
t1 = svzip1_u64(svreinterpret_u64_bf16(v2), svreinterpret_u64_bf16(v3)); | |||||
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); | |||||
svst1_bf16(pg16_all, (bfloat16_t *)b_offset + 16, | |||||
svreinterpret_bf16_u64(t1)); | |||||
a_offsetx[0] += 8; | |||||
a_offsetx[1] += 8; | |||||
a_offsetx[2] += 8; | |||||
a_offsetx[3] += 8; | |||||
b_offset += 32; | |||||
} | |||||
if (rest) { // remainder along k dim | |||||
v0 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[0]); | |||||
v1 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[1]); | |||||
v2 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[2]); | |||||
v3 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[3]); | |||||
t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); | |||||
t1 = svzip1_u64(svreinterpret_u64_bf16(v2), svreinterpret_u64_bf16(v3)); | |||||
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); | |||||
svst1_bf16(pg16_all, (bfloat16_t *)b_offset + 16, | |||||
svreinterpret_bf16_u64(t1)); | |||||
b_offset += 32; | |||||
} | |||||
} | |||||
if (n & 2) { | |||||
a_offsetx[0] = a_offset; | |||||
a_offsetx[1] = a_offsetx[0] + lda; | |||||
a_offset += 2 * lda; | |||||
for (BLASLONG i = 0; i < m / 8; i++) { | |||||
v0 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[0]); | |||||
v1 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[1]); | |||||
t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); | |||||
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); | |||||
b_offset += 16; | |||||
a_offsetx[0] += 8; | |||||
a_offsetx[1] += 8; | |||||
} | |||||
if (rest) { // remainder along k dim | |||||
v0 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[0]); | |||||
v1 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[1]); | |||||
t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); | |||||
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); | |||||
b_offset += 16; | |||||
} | |||||
} | |||||
if (n & 1) { | |||||
a_offsetx[0] = a_offset; | |||||
for (BLASLONG i = 0; i < m / 8; i++) { | |||||
v0 = svld1_bf16(pg16_first_8, (bfloat16_t *)a_offsetx[0]); | |||||
v1 = svdup_bf16(zero_value_bf16); | |||||
t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); | |||||
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); | |||||
b_offset += 16; | |||||
a_offsetx[0] += 8; | |||||
} | |||||
if (rest) { // remainder along k dim | |||||
v0 = svld1_bf16(pg16_rest, (bfloat16_t *)a_offsetx[0]); | |||||
v1 = svdup_bf16(zero_value_bf16); | |||||
t0 = svzip1_u64(svreinterpret_u64_bf16(v0), svreinterpret_u64_bf16(v1)); | |||||
svst1_bf16(pg16_all, (bfloat16_t *)b_offset, svreinterpret_bf16_u64(t0)); | |||||
} | |||||
} | |||||
return 0; | |||||
} |
@@ -1,361 +0,0 @@ | |||||
/*************************************************************************** | |||||
* Copyright (c) 2024-2025, The OpenBLAS Project | |||||
* All rights reserved. | |||||
* Redistribution and use in source and binary forms, with or without | |||||
* modification, are permitted provided that the following conditions are | |||||
* met: | |||||
* 1. Redistributions of source code must retain the above copyright | |||||
* notice, this list of conditions and the following disclaimer. | |||||
* 2. Redistributions in binary form must reproduce the above copyright | |||||
* notice, this list of conditions and the following disclaimer in | |||||
* the documentation and/or other materials provided with the | |||||
* distribution. | |||||
* 3. Neither the name of the OpenBLAS project nor the names of | |||||
* its contributors may be used to endorse or promote products | |||||
* derived from this software without specific prior written permission. | |||||
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |||||
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||||
* ARE DISCLAIMED. IN NO EVENT SHALL THE OPENBLAS PROJECT OR CONTRIBUTORS BE | |||||
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |||||
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |||||
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |||||
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |||||
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |||||
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |||||
* POSSIBILITY OF SUCH DAMAGE. | |||||
* *****************************************************************************/ | |||||
#include "common.h" | |||||
#include <arm_neon.h> | |||||
#include <arm_sve.h> | |||||
int CNAME(BLASLONG m, BLASLONG n, IFLOAT *a, BLASLONG lda, IFLOAT *b) { | |||||
BLASLONG pad_m = ((m + 7) & ~7); | |||||
BLASLONG rest = (m & 7); // rest along m dim | |||||
IFLOAT *a_offset; | |||||
IFLOAT *a_offset0, *a_offset1, *a_offset2, *a_offset3; | |||||
IFLOAT *a_offset4, *a_offset5, *a_offset6, *a_offset7; | |||||
IFLOAT *b_offset; | |||||
IFLOAT *b_offset0, *b_offset1; | |||||
a_offset = a; | |||||
b_offset = b; | |||||
svuint16_t c0, c1, c2, c3, c4, c5, c6, c7; | |||||
svuint16_t t0, t1, t2, t3; | |||||
svuint32_t m00, m01, m10, m11; | |||||
svuint64_t st_offsets_0, st_offsets_1; | |||||
svbool_t pg16_first_4 = svwhilelt_b16(0, 4); | |||||
svbool_t pg16_first_8 = svwhilelt_b16(0, 8); | |||||
svbool_t pg64_first_4 = svwhilelt_b64(0, 4); | |||||
uint32_t sizeof_u64 = 8; | |||||
uint64_t _st_offsets_0[4] = { | |||||
0 * sizeof_u64, | |||||
1 * sizeof_u64, | |||||
4 * sizeof_u64, | |||||
5 * sizeof_u64, | |||||
}; | |||||
uint64_t _st_offsets_1[4] = { | |||||
2 * sizeof_u64, | |||||
3 * sizeof_u64, | |||||
6 * sizeof_u64, | |||||
7 * sizeof_u64, | |||||
}; | |||||
st_offsets_0 = svld1_u64(pg64_first_4, _st_offsets_0); | |||||
st_offsets_1 = svld1_u64(pg64_first_4, _st_offsets_1); | |||||
for (BLASLONG j = 0; j < n / 8; j++) { | |||||
a_offset0 = a_offset; | |||||
a_offset1 = a_offset0 + lda; | |||||
a_offset2 = a_offset1 + lda; | |||||
a_offset3 = a_offset2 + lda; | |||||
a_offset4 = a_offset3 + lda; | |||||
a_offset5 = a_offset4 + lda; | |||||
a_offset6 = a_offset5 + lda; | |||||
a_offset7 = a_offset6 + lda; | |||||
a_offset += 8; | |||||
b_offset0 = b_offset; | |||||
b_offset1 = b_offset0 + 4 * pad_m; | |||||
b_offset += 8 * pad_m; | |||||
for (BLASLONG i = 0; i < m / 8; i++) { | |||||
// transpose 8x8 matrix and pack into two 4x8 block consists of two 2x4 | |||||
// small blocks | |||||
c0 = svld1_u16(pg16_first_8, a_offset0); | |||||
c1 = svld1_u16(pg16_first_8, a_offset1); | |||||
c2 = svld1_u16(pg16_first_8, a_offset2); | |||||
c3 = svld1_u16(pg16_first_8, a_offset3); | |||||
c4 = svld1_u16(pg16_first_8, a_offset4); | |||||
c5 = svld1_u16(pg16_first_8, a_offset5); | |||||
c6 = svld1_u16(pg16_first_8, a_offset6); | |||||
c7 = svld1_u16(pg16_first_8, a_offset7); | |||||
t0 = svzip1_u16(c0, c1); | |||||
t1 = svzip1_u16(c2, c3); | |||||
t2 = svzip1_u16(c4, c5); | |||||
t3 = svzip1_u16(c6, c7); | |||||
m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); | |||||
m10 = svzip2_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); | |||||
m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); | |||||
m11 = svzip2_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset0, | |||||
st_offsets_0, svreinterpret_u64_u32(m00)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset0, | |||||
st_offsets_1, svreinterpret_u64_u32(m01)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset1, | |||||
st_offsets_0, svreinterpret_u64_u32(m10)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset1, | |||||
st_offsets_1, svreinterpret_u64_u32(m11)); | |||||
a_offset0 += 8 * lda; | |||||
a_offset1 += 8 * lda; | |||||
a_offset2 += 8 * lda; | |||||
a_offset3 += 8 * lda; | |||||
a_offset4 += 8 * lda; | |||||
a_offset5 += 8 * lda; | |||||
a_offset6 += 8 * lda; | |||||
a_offset7 += 8 * lda; | |||||
b_offset0 += 32; | |||||
b_offset1 += 32; | |||||
} | |||||
if (rest) { | |||||
c0 = svld1_u16(pg16_first_8, a_offset0); | |||||
c1 = (rest >= 2 ? svld1_u16(pg16_first_8, a_offset1) : svdup_u16(0)); | |||||
c2 = (rest >= 3 ? svld1_u16(pg16_first_8, a_offset2) : svdup_u16(0)); | |||||
c3 = (rest >= 4 ? svld1_u16(pg16_first_8, a_offset3) : svdup_u16(0)); | |||||
c4 = (rest >= 5 ? svld1_u16(pg16_first_8, a_offset4) : svdup_u16(0)); | |||||
c5 = (rest >= 6 ? svld1_u16(pg16_first_8, a_offset5) : svdup_u16(0)); | |||||
c6 = (rest == 7 ? svld1_u16(pg16_first_8, a_offset6) : svdup_u16(0)); | |||||
c7 = (svdup_u16(0)); | |||||
t0 = svzip1_u16(c0, c1); | |||||
t1 = svzip1_u16(c2, c3); | |||||
t2 = svzip1_u16(c4, c5); | |||||
t3 = svzip1_u16(c6, c7); | |||||
m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); | |||||
m10 = svzip2_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); | |||||
m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); | |||||
m11 = svzip2_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset0, | |||||
st_offsets_0, svreinterpret_u64_u32(m00)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset0, | |||||
st_offsets_1, svreinterpret_u64_u32(m01)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset1, | |||||
st_offsets_0, svreinterpret_u64_u32(m10)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset1, | |||||
st_offsets_1, svreinterpret_u64_u32(m11)); | |||||
} | |||||
} | |||||
if (n & 4) { | |||||
a_offset0 = a_offset; | |||||
a_offset1 = a_offset0 + lda; | |||||
a_offset2 = a_offset1 + lda; | |||||
a_offset3 = a_offset2 + lda; | |||||
a_offset4 = a_offset3 + lda; | |||||
a_offset5 = a_offset4 + lda; | |||||
a_offset6 = a_offset5 + lda; | |||||
a_offset7 = a_offset6 + lda; | |||||
a_offset += 4; | |||||
b_offset0 = b_offset; | |||||
b_offset += 4 * pad_m; | |||||
for (BLASLONG i = 0; i < m / 8; i++) { | |||||
// transpose 8x8 matrix and pack into two 4x8 block consists of two 2x4 | |||||
// small blocks | |||||
c0 = svld1_u16(pg16_first_4, a_offset0); | |||||
c1 = svld1_u16(pg16_first_4, a_offset1); | |||||
c2 = svld1_u16(pg16_first_4, a_offset2); | |||||
c3 = svld1_u16(pg16_first_4, a_offset3); | |||||
c4 = svld1_u16(pg16_first_4, a_offset4); | |||||
c5 = svld1_u16(pg16_first_4, a_offset5); | |||||
c6 = svld1_u16(pg16_first_4, a_offset6); | |||||
c7 = svld1_u16(pg16_first_4, a_offset7); | |||||
t0 = svzip1_u16(c0, c1); | |||||
t1 = svzip1_u16(c2, c3); | |||||
t2 = svzip1_u16(c4, c5); | |||||
t3 = svzip1_u16(c6, c7); | |||||
m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); | |||||
m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset0, | |||||
st_offsets_0, svreinterpret_u64_u32(m00)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset0, | |||||
st_offsets_1, svreinterpret_u64_u32(m01)); | |||||
a_offset0 += 8 * lda; | |||||
a_offset1 += 8 * lda; | |||||
a_offset2 += 8 * lda; | |||||
a_offset3 += 8 * lda; | |||||
a_offset4 += 8 * lda; | |||||
a_offset5 += 8 * lda; | |||||
a_offset6 += 8 * lda; | |||||
a_offset7 += 8 * lda; | |||||
b_offset0 += 32; | |||||
} | |||||
if (rest) { | |||||
c0 = svld1_u16(pg16_first_4, a_offset0); // rest >= 1 | |||||
c1 = (rest >= 2 ? svld1_u16(pg16_first_4, a_offset1) : svdup_u16(0)); | |||||
c2 = (rest >= 3 ? svld1_u16(pg16_first_4, a_offset2) : svdup_u16(0)); | |||||
c3 = (rest >= 4 ? svld1_u16(pg16_first_4, a_offset3) : svdup_u16(0)); | |||||
c4 = (rest >= 5 ? svld1_u16(pg16_first_4, a_offset4) : svdup_u16(0)); | |||||
c5 = (rest >= 6 ? svld1_u16(pg16_first_4, a_offset5) : svdup_u16(0)); | |||||
c6 = (rest == 7 ? svld1_u16(pg16_first_4, a_offset6) : svdup_u16(0)); | |||||
c7 = (svdup_u16(0)); | |||||
t0 = svzip1_u16(c0, c1); | |||||
t1 = svzip1_u16(c2, c3); | |||||
t2 = svzip1_u16(c4, c5); | |||||
t3 = svzip1_u16(c6, c7); | |||||
m00 = svzip1_u32(svreinterpret_u32_u16(t0), svreinterpret_u32_u16(t1)); | |||||
m01 = svzip1_u32(svreinterpret_u32_u16(t2), svreinterpret_u32_u16(t3)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset0, | |||||
st_offsets_0, svreinterpret_u64_u32(m00)); | |||||
svst1_scatter_u64offset_u64(pg64_first_4, (uint64_t *)b_offset0, | |||||
st_offsets_1, svreinterpret_u64_u32(m01)); | |||||
} | |||||
} | |||||
if (n & 2) { | |||||
a_offset0 = a_offset; | |||||
a_offset1 = a_offset0 + lda; | |||||
a_offset2 = a_offset1 + lda; | |||||
a_offset3 = a_offset2 + lda; | |||||
a_offset4 = a_offset3 + lda; | |||||
a_offset5 = a_offset4 + lda; | |||||
a_offset6 = a_offset5 + lda; | |||||
a_offset7 = a_offset6 + lda; | |||||
a_offset += 2; | |||||
b_offset0 = b_offset; | |||||
b_offset1 = b_offset0 + 8; | |||||
b_offset += 2 * pad_m; | |||||
for (BLASLONG i = 0; i < m / 8; i++) { | |||||
for (BLASLONG line = 0; line < 2; line++) { | |||||
b_offset0[line * 4] = a_offset0[line]; | |||||
b_offset0[line * 4 + 1] = a_offset1[line]; | |||||
b_offset0[line * 4 + 2] = a_offset2[line]; | |||||
b_offset0[line * 4 + 3] = a_offset3[line]; | |||||
b_offset1[line * 4] = a_offset4[line]; | |||||
b_offset1[line * 4 + 1] = a_offset5[line]; | |||||
b_offset1[line * 4 + 2] = a_offset6[line]; | |||||
b_offset1[line * 4 + 3] = a_offset7[line]; | |||||
} | |||||
b_offset0 += 16; | |||||
b_offset1 += 16; | |||||
a_offset0 += 8 * lda; | |||||
a_offset1 += 8 * lda; | |||||
a_offset2 += 8 * lda; | |||||
a_offset3 += 8 * lda; | |||||
a_offset4 += 8 * lda; | |||||
a_offset5 += 8 * lda; | |||||
a_offset6 += 8 * lda; | |||||
a_offset7 += 8 * lda; | |||||
} | |||||
if (rest) { | |||||
for (BLASLONG line = 0; line < 2; line++) { | |||||
b_offset0[line * 4] = a_offset0[line]; | |||||
b_offset0[line * 4 + 1] = rest == 1 ? 0 : a_offset1[line]; | |||||
b_offset0[line * 4 + 2] = rest <= 2 ? 0 : a_offset2[line]; | |||||
b_offset0[line * 4 + 3] = rest <= 3 ? 0 : a_offset3[line]; | |||||
b_offset1[line * 4] = rest <= 4 ? 0 : a_offset4[line]; | |||||
b_offset1[line * 4 + 1] = rest <= 5 ? 0 : a_offset5[line]; | |||||
b_offset1[line * 4 + 2] = rest <= 6 ? 0 : a_offset6[line]; | |||||
b_offset1[line * 4 + 3] = 0; | |||||
} | |||||
} | |||||
} | |||||
if (n & 1) { | |||||
a_offset0 = a_offset; | |||||
a_offset1 = a_offset0 + lda; | |||||
a_offset2 = a_offset1 + lda; | |||||
a_offset3 = a_offset2 + lda; | |||||
a_offset4 = a_offset3 + lda; | |||||
a_offset5 = a_offset4 + lda; | |||||
a_offset6 = a_offset5 + lda; | |||||
a_offset7 = a_offset6 + lda; | |||||
for (BLASLONG i = 0; i < m / 8; i++) { | |||||
b_offset[0] = a_offset0[0]; | |||||
b_offset[1] = a_offset1[0]; | |||||
b_offset[2] = a_offset2[0]; | |||||
b_offset[3] = a_offset3[0]; | |||||
b_offset[4] = 0; | |||||
b_offset[5] = 0; | |||||
b_offset[6] = 0; | |||||
b_offset[7] = 0; | |||||
b_offset[8] = a_offset4[0]; | |||||
b_offset[9] = a_offset5[0]; | |||||
b_offset[10] = a_offset6[0]; | |||||
b_offset[11] = a_offset7[0]; | |||||
b_offset[12] = 0; | |||||
b_offset[13] = 0; | |||||
b_offset[14] = 0; | |||||
b_offset[15] = 0; | |||||
b_offset += 16; | |||||
a_offset0 += 8 * lda; | |||||
a_offset1 += 8 * lda; | |||||
a_offset2 += 8 * lda; | |||||
a_offset3 += 8 * lda; | |||||
a_offset4 += 8 * lda; | |||||
a_offset5 += 8 * lda; | |||||
a_offset6 += 8 * lda; | |||||
a_offset7 += 8 * lda; | |||||
} | |||||
if (rest) { | |||||
b_offset[0] = *a_offset0; | |||||
b_offset[1] = rest == 1 ? 0 : *a_offset1; | |||||
b_offset[2] = rest <= 2 ? 0 : *a_offset2; | |||||
b_offset[3] = rest <= 3 ? 0 : *a_offset3; | |||||
b_offset[4] = 0; | |||||
b_offset[5] = 0; | |||||
b_offset[6] = 0; | |||||
b_offset[7] = 0; | |||||
b_offset[8] = rest <= 4 ? 0 : *a_offset4; | |||||
b_offset[9] = rest <= 5 ? 0 : *a_offset5; | |||||
b_offset[10] = rest <= 6 ? 0 : *a_offset6; | |||||
b_offset[11] = 0; | |||||
b_offset[12] = 0; | |||||
b_offset[13] = 0; | |||||
b_offset[14] = 0; | |||||
b_offset[15] = 0; | |||||
} | |||||
} | |||||
return 0; | |||||
} |
@@ -3598,15 +3598,15 @@ is a big desktop or server with abundant cache rather than a phone or embedded d | |||||
#undef BGEMM_ALIGN_K | #undef BGEMM_ALIGN_K | ||||
#undef BGEMM_DEFAULT_UNROLL_M | #undef BGEMM_DEFAULT_UNROLL_M | ||||
#undef BGEMM_DEFAULT_UNROLL_N | #undef BGEMM_DEFAULT_UNROLL_N | ||||
#define BGEMM_ALIGN_K 8 | |||||
#define BGEMM_ALIGN_K 4 | |||||
#define BGEMM_DEFAULT_UNROLL_M 8 | |||||
#define BGEMM_DEFAULT_UNROLL_N 4 | #define BGEMM_DEFAULT_UNROLL_N 4 | ||||
#define BGEMM_DEFAULT_UNROLL_M 4 | |||||
#undef SBGEMM_ALIGN_K | #undef SBGEMM_ALIGN_K | ||||
#undef SBGEMM_DEFAULT_UNROLL_M | #undef SBGEMM_DEFAULT_UNROLL_M | ||||
#undef SBGEMM_DEFAULT_UNROLL_N | #undef SBGEMM_DEFAULT_UNROLL_N | ||||
#define SBGEMM_ALIGN_K 8 | |||||
#define SBGEMM_DEFAULT_UNROLL_M 4 | |||||
#define SBGEMM_ALIGN_K 4 | |||||
#define SBGEMM_DEFAULT_UNROLL_M 8 | |||||
#define SBGEMM_DEFAULT_UNROLL_N 4 | #define SBGEMM_DEFAULT_UNROLL_N 4 | ||||
#define SGEMM_DEFAULT_UNROLL_M 16 | #define SGEMM_DEFAULT_UNROLL_M 16 | ||||